PS C:\Users\test\Desktop\baize-chatbot-main\demo> python app.py decapoda-research/llama-7b-hf project-baize/baize-lora-7B c:\users\test\anaconda3\lib\site-packages\bitsandbytes\cextension.py:34: UserWarning: The installed version of bitsandbytes was compiled without GPU support. 8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.
warn("The installed version of bitsandbytes was compiled without GPU support. " 'NoneType' object has no attribute 'cadam32bit_grad_fp32' c:\users\test\anaconda3\lib\site-packages\langchain__init.py:34: UserWarning: Importing Cohere from langchain root module is no longer supported. Please use langchain.llms.Cohere instead.
warnings.warn( c:\users\test\anaconda3\lib\site-packages\langchain__init.py:34: UserWarning: Importing LLMChain from langchain root module is no longer supported. Please use langchain.chains.LLMChain instead.
warnings.warn( c:\users\test\anaconda3\lib\site-packages\langchain\init.py:34: UserWarning: Importing OpenAI from langchain root module is no longer supported. Please use langchain.llms.OpenAI instead.
warnings.warn( Traceback (most recent call last): File "C:\Users\test\Desktop\baize-chatbot-main\demo\app.py", line 9, in from app_modules.overwrites import * File "C:\Users\test\Desktop\baize-chatbot-main\demo\app_modules\overwrites.py", line 4, in from llama_index import Prompt File "c:\users\test\anaconda3\lib\site-packages\llama_index\init__.py", line 18, in from llama_index.indices.common.struct_store.base import SQLDocumentContextBuilder
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\init.py", line 4, in
from llama_index.indices.keyword_table.base import GPTKeywordTableIndex
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\keyword_table\init.py", line 4, in
from llama_index.indices.keyword_table.base import GPTKeywordTableIndex
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\keyword_table\base.py", line 18, in
from llama_index.indices.base import BaseGPTIndex
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\base.py", line 8, in
from llama_index.indices.base_retriever import BaseRetriever
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\base_retriever.py", line 5, in
from llama_index.indices.query.schema import QueryBundle, QueryType
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\query\init.py", line 3, in
from llama_index.indices.query.response_synthesis import ResponseSynthesizer
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\query\response_synthesis.py", line 5, in
from llama_index.indices.postprocessor.types import BaseNodePostprocessor
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\postprocessor\init.py", line 4, in
from llama_index.indices.postprocessor.node import (
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\postprocessor\node.py", line 13, in
from llama_index.indices.response import get_response_builder
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\response\init.py", line 3, in
from llama_index.indices.response.accumulate import Accumulate
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\response\accumulate.py", line 5, in
from llama_index.indices.response.base_builder import BaseResponseBuilder
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\response\base_builder.py", line 14, in
from llama_index.indices.service_context import ServiceContext
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\service_context.py", line 7, in
from llama_index.indices.prompt_helper import PromptHelper
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\prompt_helper.py", line 12, in
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
File "c:\users\test\anaconda3\lib\site-packages\llama_index\langchain_helpers\chain_wrapper.py", line 6, in
from llama_index.llm_predictor.base import ( # noqa: F401
File "c:\users\test\anaconda3\lib\site-packages\llama_index\llm_predictor\init.py", line 4, in
from llama_index.llm_predictor.base import LLMPredictor
File "c:\users\test\anaconda3\lib\site-packages\llama_index\llm_predictor\base.py", line 11, in
from langchain import BaseCache, Cohere, LLMChain, OpenAI
ImportError: cannot import name 'BaseCache' from 'langchain' (c:\users\test\anaconda3\lib\site-packages\langchain\init__.py)
PS C:\Users\test\Desktop\baize-chatbot-main\demo> python app.py decapoda-research/llama-7b-hf project-baize/baize-lora-7B c:\users\test\anaconda3\lib\site-packages\bitsandbytes\cextension.py:34: UserWarning: The installed version of bitsandbytes was compiled without GPU support. 8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable. from app_modules.overwrites import * File "C:\Users\test\Desktop\baize-chatbot-main\demo\app_modules\overwrites.py", line 4, in from llama_index import Prompt File "c:\users\test\anaconda3\lib\site-packages\llama_index\ init__.py", line 18, in from llama_index.indices.common.struct_store.base import SQLDocumentContextBuilder init.py", line 4, in init.py", line 4, in init.py", line 3, in init.py", line 4, in init.py", line 3, in init.py", line 4, in init__.py)
warn("The installed version of bitsandbytes was compiled without GPU support. " 'NoneType' object has no attribute 'cadam32bit_grad_fp32' c:\users\test\anaconda3\lib\site-packages\langchain__init.py:34: UserWarning: Importing Cohere from langchain root module is no longer supported. Please use langchain.llms.Cohere instead.
warnings.warn( c:\users\test\anaconda3\lib\site-packages\langchain__init.py:34: UserWarning: Importing LLMChain from langchain root module is no longer supported. Please use langchain.chains.LLMChain instead.
warnings.warn( c:\users\test\anaconda3\lib\site-packages\langchain\init.py:34: UserWarning: Importing OpenAI from langchain root module is no longer supported. Please use langchain.llms.OpenAI instead.
warnings.warn( Traceback (most recent call last): File "C:\Users\test\Desktop\baize-chatbot-main\demo\app.py", line 9, in
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\
from llama_index.indices.keyword_table.base import GPTKeywordTableIndex
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\keyword_table\
from llama_index.indices.keyword_table.base import GPTKeywordTableIndex
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\keyword_table\base.py", line 18, in
from llama_index.indices.base import BaseGPTIndex
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\base.py", line 8, in
from llama_index.indices.base_retriever import BaseRetriever
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\base_retriever.py", line 5, in
from llama_index.indices.query.schema import QueryBundle, QueryType
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\query\
from llama_index.indices.query.response_synthesis import ResponseSynthesizer
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\query\response_synthesis.py", line 5, in
from llama_index.indices.postprocessor.types import BaseNodePostprocessor
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\postprocessor\
from llama_index.indices.postprocessor.node import (
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\postprocessor\node.py", line 13, in
from llama_index.indices.response import get_response_builder
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\response\
from llama_index.indices.response.accumulate import Accumulate
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\response\accumulate.py", line 5, in
from llama_index.indices.response.base_builder import BaseResponseBuilder
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\response\base_builder.py", line 14, in
from llama_index.indices.service_context import ServiceContext
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\service_context.py", line 7, in
from llama_index.indices.prompt_helper import PromptHelper
File "c:\users\test\anaconda3\lib\site-packages\llama_index\indices\prompt_helper.py", line 12, in
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
File "c:\users\test\anaconda3\lib\site-packages\llama_index\langchain_helpers\chain_wrapper.py", line 6, in
from llama_index.llm_predictor.base import ( # noqa: F401
File "c:\users\test\anaconda3\lib\site-packages\llama_index\llm_predictor\
from llama_index.llm_predictor.base import LLMPredictor
File "c:\users\test\anaconda3\lib\site-packages\llama_index\llm_predictor\base.py", line 11, in
from langchain import BaseCache, Cohere, LLMChain, OpenAI ImportError: cannot import name 'BaseCache' from 'langchain' (c:\users\test\anaconda3\lib\site-packages\langchain\