Open doubtfire009 opened 5 months ago
hi @doubtfire009 can you try rag_chain.invoke({"question": input_text})
?
BTW we have updated RAG example : https://github.com/openvinotoolkit/openvino_notebooks/tree/latest/notebooks/llm-rag-langchain
I use rag_chain.invoke({"question": input_text}) but get the bug:
Entering new RetrievalQA chain... Traceback (most recent call last): File "D:\AI_projects\chatglm3.openvino\chat_from_doc_new.py", line 156, in
rag_chain.invoke({"question": input_text}) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 151, in invoke self._validate_inputs(inputs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 279, in _validate_inputs raise ValueError(f"Missing some input keys: {missing_keys}") ValueError: Missing some input keys: {'query'}
I use rag_chain.invoke({"query": input_text}) but have the error:
Entering new RetrievalQA chain... Traceback (most recent call last): File "D:\AI_projects\chatglm3.openvino\chat_from_doc_new.py", line 155, in
rag_chain.invoke({"query": input_text}) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\retrieval_qa\base.py", line 144, in _call answer = self.combine_documents_chain.run( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 574, in run return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[ File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(args, kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 378, in call return self.invoke( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\combine_documents\base.py", line 137, in _call output, extra_return_dict = self.combine_docs( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\combine_documents\stuff.py", line 244, in combine_docs return self.llm_chain.predict(callbacks=callbacks, inputs), {} File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 293, in predict return self(kwargs, callbacks=callbacks)[self.output_key] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(args, kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 378, in call return self.invoke( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 103, in _call response = self.generate([inputs], run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 115, in generate return self.llm.generate_prompt( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 597, in generate_prompt return self.generate(prompt_strings, stop=stop, callbacks=callbacks, kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 767, in generate output = self._generate_helper( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 634, in _generate_helper raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 621, in _generate_helper self._generate( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_community\llms\huggingface_pipeline.py", line 267, in _generate responses = self.pipeline( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\text_generation.py", line 240, in call return super().call(text_inputs, kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\base.py", line 1187, in call outputs = list(final_iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 124, in next item = next(self.iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 124, in next item = next(self.iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\dataloader.py", line 631, in next data = self._next_data() File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\dataloader.py", line 675, in _next_data data = self._dataset_fetcher.fetch(index) # may raise StopIteration File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data_utils\fetch.py", line 51, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data_utils\fetch.py", line 51, in data = [self.dataset[idx] for idx in possibly_batched_index] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 19, in getitem processed = self.process(item, **self.params) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\text_generation.py", line 264, in preprocess inputs = self.tokenizer( TypeError: 'NoneType' object is not callable
The issue is not fixed. Can you help with this?
Have try original HuggingFace pipeline ?
model = AutoModelForCausalLM.from_pretrained
I believe this issue is not caused by OpenVINO, but LangChain.
My code is here: `import argparse from typing import List, Tuple from threading import Event, Thread import torch from optimum.intel.openvino import OVModelForCausalLM from transformers import (AutoTokenizer, AutoConfig, pipeline, TextIteratorStreamer, StoppingCriteriaList, StoppingCriteria) from langchain_community.vectorstores import FAISS from langchain.prompts.prompt import PromptTemplate from langchain.embeddings.huggingface import HuggingFaceEmbeddings from langchain.llms import HuggingFacePipeline from langchain.chains import RetrievalQA
def create_and_load_faiss_index(read_local=None, path=None, document_list=[]): global db if read_local is True:
读本地数据
class StopOnTokens(StoppingCriteria): def init(self, token_ids): self.token_ids = token_ids
if name == "main": parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-h', '--help', action='help', help='Show this help message and exit.') parser.add_argument('-m', '--model_path', required=True, type=str, help='Required. model path') parser.add_argument('-l', '--max_sequence_length', default=256, required=False, type=int, help='Required. maximun length of output') parser.add_argument('-d', '--device', default='CPU', required=False, type=str, help='Required. device for inference') args = parser.parse_args() model_dir = args.model_path
` But I am encountered with the error:
> Entering new RetrievalQA chain... Traceback (most recent call last): File "D:\AI_projects\chatglm3.openvino\chat_from_doc_new.py", line 209, in <module> rag_chain.invoke(input_text) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\retrieval_qa\base.py", line 144, in _call answer = self.combine_documents_chain.run( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, **kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 574, in run return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[ File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, **kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 378, in __call__ return self.invoke( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\combine_documents\base.py", line 137, in _call output, extra_return_dict = self.combine_docs( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\combine_documents\stuff.py", line 244, in combine_docs return self.llm_chain.predict(callbacks=callbacks, **inputs), {} File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 293, in predict return self(kwargs, callbacks=callbacks)[self.output_key] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, **kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 378, in __call__ return self.invoke( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 103, in _call response = self.generate([inputs], run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 115, in generate return self.llm.generate_prompt( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 597, in generate_prompt return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 767, in generate output = self._generate_helper( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 634, in _generate_helper raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 621, in _generate_helper self._generate( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_community\llms\huggingface_pipeline.py", line 267, in _generate responses = self.pipeline( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\text_generation.py", line 240, in __call__ return super().__call__(text_inputs, **kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\base.py", line 1187, in __call__ outputs = list(final_iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 124, in __next__ item = next(self.iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 124, in __next__ item = next(self.iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\dataloader.py", line 631, in __next__ data = self._next_data() File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\dataloader.py", line 675, in _next_data data = self._dataset_fetcher.fetch(index) # may raise StopIteration File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\_utils\fetch.py", line 51, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\_utils\fetch.py", line 51, in <listcomp> data = [self.dataset[idx] for idx in possibly_batched_index] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 19, in __getitem__ processed = self.process(item, **self.params) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\text_generation.py", line 264, in preprocess inputs = self.tokenizer( TypeError: 'NoneType' object is not callable
Can you help with this? Thanks!