Closed 18811449050 closed 3 weeks ago
Hello, @18811449050! I'm here to assist you with any bugs, questions, or contributions you may have. Let's squash those bugs together! 🐞
To resolve the AttributeError: 'property' object has no attribute 'context_window'
error in your custom Ollama LLM code, you need to ensure that the context_window
attribute is accessed correctly. The error suggests that context_window
is being accessed as a property, but it should be an instance attribute.
In the provided code, the context_window
is defined as an instance attribute using Field
. However, the metadata
property is trying to access it directly. To fix this, you should ensure that self.context_window
is correctly initialized and accessed.
Here's the corrected code snippet for the metadata
property:
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=DEFAULT_NUM_OUTPUTS,
model_name=self.model,
is_chat_model=True, # Ollama supports chat API for all models
)
This code correctly accesses self.context_window
as an instance attribute, which should resolve the AttributeError
.
Here is an example of how the context_window
attribute is defined and used in the Ollama
class from the LlamaIndex framework:
class Ollama(CustomLLM):
"""Ollama LLM.
Visit https://ollama.com/ to download and install Ollama.
Run `ollama serve` to start a server.
Run `ollama pull <name>` to download a model to run.
Examples:
`pip install llama-index-llms-ollama`
```python
from llama_index.llms.ollama import Ollama
llm = Ollama(model="llama2", request_timeout=60.0)
response = llm.complete("What is the capital of France?")
print(response)
"""
base_url: str = Field(
default="http://localhost:11434",
description="Base url the model is hosted under.",
)
model: str = Field(description="The Ollama model to use.")
temperature: float = Field(
default=0.75,
description="The temperature to use for sampling.",
gte=0.0,
lte=1.0,
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
request_timeout: float = Field(
default=DEFAULT_REQUEST_TIMEOUT,
description="The timeout for making http request to Ollama API server",
)
prompt_key: str = Field(
default="prompt", description="The key to use for the prompt in API calls."
)
json_mode: bool = Field(
default=False,
description="Whether to use JSON mode for the Ollama API.",
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional model parameters for the Ollama API.",
)
@classmethod
def class_name(cls) -> str:
return "Ollama_llm"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=DEFAULT_NUM_OUTPUTS,
model_name=self.model,
is_chat_model=True, # Ollama supports chat API for all models
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"temperature": self.temperature,
"num_ctx": self.context_window,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
payload = {
"model": self.model,
"messages": [
{
"role": message.role.value,
"content": message.content,
**message.additional_kwargs,
}
for message in messages
],
"options": self._model_kwargs,
"stream": False,
**kwargs,
}
if self.json_mode:
payload["format"] = "json"
with httpx.Client(timeout=Timeout(self.request_timeout)) as client:
response = client.post(
url=f"{self.base_url}/api/chat",
json=payload,
)
response.raise_for_status()
raw = response.json()
message = raw["message"]
return ChatResponse(
message=ChatMessage(
content=message.get("content"),
role=MessageRole(message.get("role")),
additional_kwargs=get_additional_kwargs(
message, ("content", "role")
),
),
raw=raw,
additional_kwargs=get_additional_kwargs(raw, ("message",)),
)
This example demonstrates how to define and use the `context_window` attribute correctly, ensuring that it is accessed as an instance attribute <sup>[[1]](https://github.com/run-llama/llama_index/blob/main/llama-index-legacy/llama_index/legacy/llms/ollama.py)</sup><sup>[[2]](https://github.com/run-llama/llama_index/blob/main/llama-index-integrations/llms/llama-index-llms-perplexity/llama_index/llms/perplexity/base.py)</sup><sup>[[3]](https://github.com/run-llama/llama_index/blob/main/llama-index-legacy/llama_index/legacy/llms/perplexity.py)</sup><sup>[[4]](https://github.com/run-llama/llama_index/blob/main/llama-index-legacy/llama_index/legacy/llms/xinference.py)</sup><sup>[[5]](https://github.com/run-llama/llama_index/blob/main/llama-index-integrations/llms/llama-index-llms-ollama/llama_index/llms/ollama/base.py)</sup>.
<!-- Dosu Comment Footer -->
*To continue talking to [Dosu](https://dosu.dev), mention @dosu.*
Question Validation
Question
I have a custom ollama llm running in local , but i'm facing the above issue when i come to query.
class LiLlm(Ollama): @staticmethod def get_resp(model_name, text, stream=False): llm = Ollama( model=model_name, request_timeout=120.0, context_window=3900,
json_mode=True,
Settings.llm = LiLlm Settings.embed_model = HuggingFaceEmbeddingModel(model_name="/home/lanyun/model/instructor-large-model")
Load Documents¶ Parse into Nodes
documents = SimpleDirectoryReader("./data/data1").load_data() nodes = SentenceSplitter().get_nodes_from_documents(documents)
Add to Docstore¶
docstore = SimpleDocumentStore() docstore.add_documents(nodes)
Define Multiple Indexes
Each index uses the same underlying Node.
storage_context = StorageContext.from_defaults(docstore=docstore) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_table_index = SimpleKeywordTableIndex( nodes, storage_context=storage_context )
NOTE: the docstore sitll has the same nodes
print(storage_context.docstore.docs)
print(storage_context.docstore.docs.keys())
print(len(storage_context.docstore.docs))
query_engine = summary_index.as_query_engine() response = query_engine.query("What is a summary of this document?")
Traceback (most recent call last): File "/home/lanyun/mgl/llama_index_security_project/li_storge.py", line 40, in
query_engine = summary_index.as_query_engine()
File "/home/lanyun/miniconda3/envs/llama_index_env1/lib/python3.9/site-packages/llama_index/core/indices/base.py", line 411, in as_query_engine
return RetrieverQueryEngine.from_args(
File "/home/lanyun/miniconda3/envs/llama_index_env1/lib/python3.9/site-packages/llama_index/core/query_engine/retriever_query_engine.py", line 110, in from_args
response_synthesizer = response_synthesizer or get_response_synthesizer(
File "/home/lanyun/miniconda3/envs/llama_index_env1/lib/python3.9/site-packages/llama_index/core/response_synthesizers/factory.py", line 72, in get_response_synthesizer
or PromptHelper.from_llm_metadata(
File "/home/lanyun/miniconda3/envs/llama_index_env1/lib/python3.9/site-packages/llama_index/core/indices/prompt_helper.py", line 117, in from_llm_metadata
context_window = llm_metadata.context_window
AttributeError: 'property' object has no attribute 'context_window'