[X] I have searched both the documentation and discord for an answer.
Question
我的代码如下:
from llama_index.core import KnowledgeGraphIndex,SimpleDirectoryReader,Settings
from llama_index.graph_stores.neo4j import Neo4jGraphStore
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import PromptTemplate
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import StorageContext, load_index_from_storage
import os
import torch
import json
import re
#
# 指定要检查的路径
path = './ground'
# 初始化Neo4j图存储
graph_store = Neo4jGraphStore(
username="neo4j",
password="12345678",
url="bolt://localhost:7687",
database="ground"
)
# 创建存储上下文
storage_context = StorageContext.from_defaults(graph_store=graph_store)
Settings.llm = llm
Settings.embed_model = embed
# node_parser = SentenceSplitter()
# nodes = node_parser.get_nodes_from_documents(documents)
if os.path.exists(path) and os.path.isdir(path) and (flag == True):
print("exit!!!!!!")
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=path)
)
# embed_model=embed
retriever = index.as_retriever(retriever_mode="hybrid",embed_model=embed)
ans = retriever.retrieve(question)
print("1",ans)
for node in ans:
text = node.metadata['kg_rel_texts']
print(text)
query_engine = index.as_query_engine(
include_text=True, response_mode="tree_summarize",embedding_mode="hybrid",
)
response = query_engine.query(question)
response_str = response.__str__()
print("hh", response_str)
result = re.split(r"[/INST]|<</INST>>", response_str)
result = result[0]
print("result:", result)
else:
print("no exit!!!!!!!")
# 清理数据集
graph_store.query(
"""
MATCH (n) DETACH DELETE n
"""
)
# 初始化一个空的索引
index = KnowledgeGraphIndex.from_documents(
documents,
# [],
storage_context=storage_context,
max_triplets_per_chunk=None,
include_embeddings=True,
)
json_filename = '/home/huidao/KnowledgeProject/pythonProject/GroundSystem/Data_Json/data.json'
with open(json_filename, 'r' ,encoding='utf-8') as file:
data = json.load(file)
file.close()
tups = [(item['first_entity'],item['relation'],item['two_entity']) for item in data]
for tup in tups:
# index.upsert_triplet_and_node(tup, nodes[0])
index.upsert_triplet(tup,include_embeddings=True)
# embed_model=embed_model
retriever = index.as_retriever(retriever_mode="hybrid",embed_model=embed,similarity_top_k=10)
print("1111")
ans = retriever.retrieve(question)
print("1",ans)
for node in ans:
text = node.metadata['kg_rel_texts']
print(text)
query_engine = index.as_query_engine(
include_text=True, response_mode="tree_summarize",embedding_mode="hybrid",
similarity_top_k = 10,
)
response = query_engine.query(question)
response_str = response.__str__()
print("hh",response_str )
result = re.split(r"[/INST]|<</INST>>", response_str)
result = result[0]
print("result:",result)
# display(Markdown(f"<b>{response}</b>"))
# save and load
index.storage_context.persist(persist_dir=path)
return result
检查是否直接运行了此脚本
if name == "main":
documents = SimpleDirectoryReader(
'/home/huidao/KnowledgeProject/pythonProject/地面流程'
).load_data()
question = "节流阀的分类有哪几种,分别具有哪些特点?"
graph_ground_process(documents,question,False)
报错信息:
Traceback (most recent call last):
File "/home/huidao/KnowledgeProject/pythonProject/GroundSystem/ground_demo.py", line 160, in
graph_ground_process(documents,question,False)
File "/home/huidao/KnowledgeProject/pythonProject/GroundSystem/ground_demo.py", line 105, in graph_ground_process
index = KnowledgeGraphIndex.from_documents(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/base.py", line 145, in from_documents
return cls(
^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/knowledge_graph/base.py", line 109, in init
super().init(
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/base.py", line 94, in init
index_struct = self.build_index_from_nodes(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/base.py", line 216, in build_index_from_nodes
return self._build_index_from_nodes(nodes, build_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/knowledge_graph/base.py", line 215, in _build_index_from_nodes
triplets = self._extract_triplets(
^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/knowledge_graph/base.py", line 159, in _extract_triplets
return self._llm_extract_triplets(text)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/knowledge_graph/base.py", line 163, in _llm_extract_triplets
response = self._llm.predict(
^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/instrumentation/dispatcher.py", line 230, in wrapper
result = func(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/llms/llm.py", line 438, in predict
response = self.complete(formatted_prompt, formatted=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/instrumentation/dispatcher.py", line 230, in wrapper
result = func(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/llms/callbacks.py", line 429, in wrapped_llm_predict
f_return_val = f(_self, *args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/llms/huggingface/base.py", line 360, in complete
tokens = self._model.generate(
^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/transformers/generation/utils.py", line 1914, in generate
result = self._sample(
^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/transformers/generation/utils.py", line 2651, in _sample
outputs = self(
^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/.cache/huggingface/modules/transformers_modules/glm-4-9b-chat/modeling_chatglm.py", line 878, in forward
transformer_outputs = self.transformer(
^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/.cache/huggingface/modules/transformers_modules/glm-4-9b-chat/modeling_chatglm.py", line 774, in forward
hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/.cache/huggingface/modules/transformers_modules/glm-4-9b-chat/modeling_chatglm.py", line 607, in forward
layer_ret = layer(
^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/.cache/huggingface/modules/transformers_modules/glm-4-9b-chat/modeling_chatglm.py", line 510, in forward
attention_output, kv_cache = self.self_attention(
^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/.cache/huggingface/modules/transformers_modules/glm-4-9b-chat/modeling_chatglm.py", line 376, in forward
cache_k, cache_v = kv_cache
^^^^^^^^^^^^^^^^
ValueError: too many values to unpack (expected 2)
Question Validation
Question
我的代码如下: from llama_index.core import KnowledgeGraphIndex,SimpleDirectoryReader,Settings
from llama_index.graph_stores.neo4j import Neo4jGraphStore from llama_index.core.node_parser import SentenceSplitter from llama_index.core import PromptTemplate from llama_index.llms.huggingface import HuggingFaceLLM from llama_index.embeddings.huggingface import HuggingFaceEmbedding from llama_index.core import StorageContext, load_index_from_storage import os import torch import json import re #
llm = HuggingFaceLLM(
tokenizer_name='/home/cdhd/Desktop/glm4/glm-4-9b-chat',
model_name='/home/cdhd/Desktop/glm4/glm-4-9b-chat',
)
SYSTEM_PROMPT = """你是一个知识图谱查询专家,请将查询到的内容整理成完整的一句话返回,不添加其他的内容和自己的理解. """
query_wrapper_prompt = PromptTemplate( "[INST]<>\n" + SYSTEM_PROMPT + "< >\n\n{query_str}[/INST] "
)
llm = HuggingFaceLLM( context_window=4096, max_new_tokens=1024, generate_kwargs={"temperature": 0.1, "do_sample": True}, query_wrapper_prompt=query_wrapper_prompt,
tokenizer_name='/home/cdhd/PythonCode/pythonProject/model/gpt2',
)
embed= HuggingFaceEmbedding( model_name='/home/huidao/SDHD_AI_project/flask_project/KnowledgeBase/models/bge-large-zh-v1.5/models--BAAI--bge-large-zh-v1.5/snapshots/79e7739b6ab944e86d6171e44d24c997fc1e0116',
)
def graph_ground_process(documents,question,flag):
检查是否直接运行了此脚本
if name == "main": documents = SimpleDirectoryReader( '/home/huidao/KnowledgeProject/pythonProject/地面流程' ).load_data() question = "节流阀的分类有哪几种,分别具有哪些特点?" graph_ground_process(documents,question,False)
File "/home/huidao/KnowledgeProject/pythonProject/GroundSystem/ground_demo.py", line 160, in
graph_ground_process(documents,question,False)
File "/home/huidao/KnowledgeProject/pythonProject/GroundSystem/ground_demo.py", line 105, in graph_ground_process
index = KnowledgeGraphIndex.from_documents(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/base.py", line 145, in from_documents
return cls(
^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/knowledge_graph/base.py", line 109, in init
super().init(
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/base.py", line 94, in init
index_struct = self.build_index_from_nodes(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/base.py", line 216, in build_index_from_nodes
return self._build_index_from_nodes(nodes, build_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/knowledge_graph/base.py", line 215, in _build_index_from_nodes
triplets = self._extract_triplets(
^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/knowledge_graph/base.py", line 159, in _extract_triplets
return self._llm_extract_triplets(text)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/indices/knowledge_graph/base.py", line 163, in _llm_extract_triplets
response = self._llm.predict(
^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/instrumentation/dispatcher.py", line 230, in wrapper
result = func(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/llms/llm.py", line 438, in predict
response = self.complete(formatted_prompt, formatted=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/instrumentation/dispatcher.py", line 230, in wrapper
result = func(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/core/llms/callbacks.py", line 429, in wrapped_llm_predict
f_return_val = f(_self, *args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/llama_index/llms/huggingface/base.py", line 360, in complete
tokens = self._model.generate(
^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/transformers/generation/utils.py", line 1914, in generate
result = self._sample(
^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/transformers/generation/utils.py", line 2651, in _sample
outputs = self(
^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/.cache/huggingface/modules/transformers_modules/glm-4-9b-chat/modeling_chatglm.py", line 878, in forward
transformer_outputs = self.transformer(
^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/.cache/huggingface/modules/transformers_modules/glm-4-9b-chat/modeling_chatglm.py", line 774, in forward
hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/.cache/huggingface/modules/transformers_modules/glm-4-9b-chat/modeling_chatglm.py", line 607, in forward
layer_ret = layer(
^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(*args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/.cache/huggingface/modules/transformers_modules/glm-4-9b-chat/modeling_chatglm.py", line 510, in forward
attention_output, kv_cache = self.self_attention(
^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/KnowledgeProject/pythonProject/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
return forward_call(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/huidao/.cache/huggingface/modules/transformers_modules/glm-4-9b-chat/modeling_chatglm.py", line 376, in forward
cache_k, cache_v = kv_cache
^^^^^^^^^^^^^^^^
ValueError: too many values to unpack (expected 2)
这个具体应该如何修改代码解决我的问题。