openvino-dev-samples / chatglm3.openvino

This sample shows how to deploy ChatGLM3 using OpenVINO
17 stars 5 forks source link

rag_chain.invoke has an error: "TypeError: 'NoneType' object is not callable" #7

Open doubtfire009 opened 5 months ago

doubtfire009 commented 5 months ago

My code is here: `import argparse from typing import List, Tuple from threading import Event, Thread import torch from optimum.intel.openvino import OVModelForCausalLM from transformers import (AutoTokenizer, AutoConfig, pipeline, TextIteratorStreamer, StoppingCriteriaList, StoppingCriteria) from langchain_community.vectorstores import FAISS from langchain.prompts.prompt import PromptTemplate from langchain.embeddings.huggingface import HuggingFaceEmbeddings from langchain.llms import HuggingFacePipeline from langchain.chains import RetrievalQA

def create_and_load_faiss_index(read_local=None, path=None, document_list=[]): global db if read_local is True:

读本地数据

    # db = FAISS.load_local(path, embeddings)
    db = FAISS.load_local(path, embeddings, allow_dangerous_deserialization=True)

else:
    print("构建向量数据库中...")
    db = FAISS.from_documents(document_list, embeddings)
    db.save_local(path)
return db

class StopOnTokens(StoppingCriteria): def init(self, token_ids): self.token_ids = token_ids

def __call__(
        self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs
) -> bool:
    for stop_id in self.token_ids:
        if input_ids[0][-1] == stop_id:
            return True
    return False

if name == "main": parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-h', '--help', action='help', help='Show this help message and exit.') parser.add_argument('-m', '--model_path', required=True, type=str, help='Required. model path') parser.add_argument('-l', '--max_sequence_length', default=256, required=False, type=int, help='Required. maximun length of output') parser.add_argument('-d', '--device', default='CPU', required=False, type=str, help='Required. device for inference') args = parser.parse_args() model_dir = args.model_path

ov_config = {"PERFORMANCE_HINT": "LATENCY",
             "NUM_STREAMS": "1", "CACHE_DIR": ""}

tokenizer = AutoTokenizer.from_pretrained(
    model_dir, trust_remote_code=True)

print("====Compiling model====")
ov_model = OVModelForCausalLM.from_pretrained(
    model_dir,
    device=args.device,
    ov_config=ov_config,
    config=AutoConfig.from_pretrained(model_dir, trust_remote_code=True),
    trust_remote_code=True,
)
# TextIteratorStreamer ???
streamer = TextIteratorStreamer(
    tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True
)
stop_tokens = [0, 2]
print('StopOnTokens')

stop_tokens = [StopOnTokens(stop_tokens)]

embeddingsModelPath: str = 'D:/AI_projects/Langchain-Chatchat/llm_model/Embedding/bge-large-zh-v1.5'
embeddings = HuggingFaceEmbeddings(
    model_name=embeddingsModelPath,  # Provide the pre-trained model's path
    model_kwargs={"trust_remote_code": True, 'device': 'cpu'},  # Pass the model configuration options
    encode_kwargs={'normalize_embeddings': False}  # Pass the encoding options
)
db = create_and_load_faiss_index(read_local=True, path="pkl_chatglm3", document_list=[])

history = []
print("====Starting conversation====")

input_text = '发什么快递'

print("ChatGLM3-6B-OpenVINO:", end=" ")
# history = history + [[parse_text(input_text), ""]]
# model_inputs = convert_history_to_token(history)

docs_and_scores = db.similarity_search_with_score(input_text)
print('docs_and_scores')
print(input_text)
print(docs_and_scores)

retriever = db.as_retriever(search_kwargs={"k": 3})

# StoppingCriteriaList ???
generate_kwargs = dict(
    # input_ids=model_inputs,
    model=ov_model,
    max_new_tokens=args.max_sequence_length,
    temperature=0.1,
    do_sample=True,
    top_p=1.0,
    top_k=50,
    repetition_penalty=1.1,
    streamer=streamer,
    stopping_criteria=StoppingCriteriaList(stop_tokens)
)

pipe = pipeline("text-generation", **generate_kwargs)
llm = HuggingFacePipeline(pipeline=pipe)

# prompt = PromptTemplate.from_template(llm_model_configuration["rag_prompt_template"])
prompt_template = """
            尽可能详细地回答问题,从提供的上下文中提供所有细节。如果答案不在提供的上下文中,请说“答案在上下文中不可用”,不要提供错误的答案。\n\n
            上下文:\n {context}?\n
            问题: \n{question}\n

            答案:"""
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
chain_type_kwargs = {"prompt": prompt}
rag_chain = RetrievalQA.from_chain_type(
    llm=llm,
    chain_type="stuff",
    retriever=retriever,
    verbose=True
    # chain_type_kwargs=chain_type_kwargs,
)

print('question')
print(input_text)
rag_chain.invoke(input_text)
# stream_complete.set()

` But I am encountered with the error:

> Entering new RetrievalQA chain... Traceback (most recent call last): File "D:\AI_projects\chatglm3.openvino\chat_from_doc_new.py", line 209, in <module> rag_chain.invoke(input_text) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\retrieval_qa\base.py", line 144, in _call answer = self.combine_documents_chain.run( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, **kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 574, in run return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[ File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, **kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 378, in __call__ return self.invoke( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\combine_documents\base.py", line 137, in _call output, extra_return_dict = self.combine_docs( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\combine_documents\stuff.py", line 244, in combine_docs return self.llm_chain.predict(callbacks=callbacks, **inputs), {} File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 293, in predict return self(kwargs, callbacks=callbacks)[self.output_key] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, **kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 378, in __call__ return self.invoke( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 103, in _call response = self.generate([inputs], run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 115, in generate return self.llm.generate_prompt( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 597, in generate_prompt return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 767, in generate output = self._generate_helper( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 634, in _generate_helper raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 621, in _generate_helper self._generate( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_community\llms\huggingface_pipeline.py", line 267, in _generate responses = self.pipeline( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\text_generation.py", line 240, in __call__ return super().__call__(text_inputs, **kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\base.py", line 1187, in __call__ outputs = list(final_iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 124, in __next__ item = next(self.iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 124, in __next__ item = next(self.iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\dataloader.py", line 631, in __next__ data = self._next_data() File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\dataloader.py", line 675, in _next_data data = self._dataset_fetcher.fetch(index) # may raise StopIteration File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\_utils\fetch.py", line 51, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\_utils\fetch.py", line 51, in <listcomp> data = [self.dataset[idx] for idx in possibly_batched_index] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 19, in __getitem__ processed = self.process(item, **self.params) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\text_generation.py", line 264, in preprocess inputs = self.tokenizer( TypeError: 'NoneType' object is not callable

Can you help with this? Thanks!

openvino-dev-samples commented 5 months ago

hi @doubtfire009 can you try rag_chain.invoke({"question": input_text}) ?

BTW we have updated RAG example : https://github.com/openvinotoolkit/openvino_notebooks/tree/latest/notebooks/llm-rag-langchain

doubtfire009 commented 5 months ago

I use rag_chain.invoke({"question": input_text}) but get the bug:

Entering new RetrievalQA chain... Traceback (most recent call last): File "D:\AI_projects\chatglm3.openvino\chat_from_doc_new.py", line 156, in rag_chain.invoke({"question": input_text}) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 151, in invoke self._validate_inputs(inputs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 279, in _validate_inputs raise ValueError(f"Missing some input keys: {missing_keys}") ValueError: Missing some input keys: {'query'}

I use rag_chain.invoke({"query": input_text}) but have the error:

Entering new RetrievalQA chain... Traceback (most recent call last): File "D:\AI_projects\chatglm3.openvino\chat_from_doc_new.py", line 155, in rag_chain.invoke({"query": input_text}) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\retrieval_qa\base.py", line 144, in _call answer = self.combine_documents_chain.run( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 574, in run return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[ File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(args, kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 378, in call return self.invoke( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\combine_documents\base.py", line 137, in _call output, extra_return_dict = self.combine_docs( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\combine_documents\stuff.py", line 244, in combine_docs return self.llm_chain.predict(callbacks=callbacks, inputs), {} File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 293, in predict return self(kwargs, callbacks=callbacks)[self.output_key] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core_api\deprecation.py", line 145, in warning_emitting_wrapper return wrapped(args, kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 378, in call return self.invoke( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 103, in _call response = self.generate([inputs], run_manager=run_manager) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain\chains\llm.py", line 115, in generate return self.llm.generate_prompt( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 597, in generate_prompt return self.generate(prompt_strings, stop=stop, callbacks=callbacks, kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 767, in generate output = self._generate_helper( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 634, in _generate_helper raise e File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_core\language_models\llms.py", line 621, in _generate_helper self._generate( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\langchain_community\llms\huggingface_pipeline.py", line 267, in _generate responses = self.pipeline( File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\text_generation.py", line 240, in call return super().call(text_inputs, kwargs) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\base.py", line 1187, in call outputs = list(final_iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 124, in next item = next(self.iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 124, in next item = next(self.iterator) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\dataloader.py", line 631, in next data = self._next_data() File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data\dataloader.py", line 675, in _next_data data = self._dataset_fetcher.fetch(index) # may raise StopIteration File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data_utils\fetch.py", line 51, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\torch\utils\data_utils\fetch.py", line 51, in data = [self.dataset[idx] for idx in possibly_batched_index] File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\pt_utils.py", line 19, in getitem processed = self.process(item, **self.params) File "C:\ProgramData\anaconda3\envs\llm_310_onv\lib\site-packages\transformers\pipelines\text_generation.py", line 264, in preprocess inputs = self.tokenizer( TypeError: 'NoneType' object is not callable

The issue is not fixed. Can you help with this?

openvino-dev-samples commented 5 months ago

Have try original HuggingFace pipeline ? model = AutoModelForCausalLM.from_pretrained

I believe this issue is not caused by OpenVINO, but LangChain.