neuml / txtai

💡 All-in-one open-source embeddings database for semantic search, LLM orchestration and language model workflows
https://neuml.github.io/txtai
Apache License 2.0
9.38k stars 603 forks source link

json Response Error when trying to use ollama embeddings in txtai #738

Closed SiddhardhaSaran closed 4 months ago

SiddhardhaSaran commented 4 months ago

I am not sure if i am doing it correct but looking at the various examples i was able to get the following code to try and get ollama embeddings into txtai but i keep getting an error. Here is a minimal reproducible code. I ensured that Ollama server is running. Platform is Macos, also doesnt work on Windows with same error, txtai version 7.2.0.

import ollama
import numpy as np
from txtai import Embeddings

def transform(text):
    n_array = ollama.embeddings(model="all-minilm", prompt=text)
    return np.array(n_array["embedding"])

def test_stream(dataset):
    for i, row in enumerate(dataset):
        yield i,transform(row)

data = [
  "US tops 5 million confirmed virus cases",
  "Canada's last fully intact ice shelf has suddenly collapsed, " +
  "forming a Manhattan-sized iceberg",
  "Beijing mobilises invasion craft along coast as Taiwan tensions escalate",
  "The National Park Service warns against sacrificing slower friends " +
  "in a bear attack",
  "Maine man wins $1M from $25 lottery ticket",
  "Make huge profits without work, earn up to $100,000 a day"
]

embeddings = Embeddings({"transform": transform, "backend": "numpy", "content": True})
embeddings.index(data)
embeddings.search("feel good story", 1)
Error { "name": "ResponseError", "message": "json: cannot unmarshal array into Go struct field EmbeddingRequest.prompt of type string", "stack": "--------------------------------------------------------------------------- ResponseError Traceback (most recent call last) Cell In[5], line 13 1 data = [ 2 \"US tops 5 million confirmed virus cases\", 3 \"Canada's last fully intact ice shelf has suddenly collapsed, \" + (...) 9 \"Make huge profits without work, earn up to $100,000 a day\" 10 ] 12 embeddings = Embeddings({\"transform\": transform, \"backend\": \"numpy\", \"content\": True}) ---> 13 embeddings.index(data) 14 embeddings.search(\"feel good story\", 1) File ~/Documents/LLM_test/.venv/lib/python3.12/site-packages/txtai/embeddings/base.py:120, in Embeddings.index(self, documents, reindex) 116 stream = Stream(self, Action.REINDEX if reindex else Action.INDEX) 118 with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".npy\") as buffer: 119 # Load documents into database and transform to vectors --> 120 ids, dimensions, embeddings = transform(stream(documents), buffer) 121 if embeddings is not None: 122 # Build LSA model (if enabled). Remove principal components from embeddings. 123 if self.config.get(\"pca\"): File ~/Documents/LLM_test/.venv/lib/python3.12/site-packages/txtai/embeddings/index/transform.py:76, in Transform.__call__(self, documents, buffer) 73 ids, dimensions, embeddings = None, None, None 75 if self.model: ---> 76 ids, dimensions, embeddings = self.vectors(documents, buffer) 77 else: 78 ids = self.ids(documents) File ~/Documents/LLM_test/.venv/lib/python3.12/site-packages/txtai/embeddings/index/transform.py:95, in Transform.vectors(self, documents, buffer) 83 \"\"\" 84 Runs a vectors transform operation when dense indexing is enabled. 85 (...) 91 (document ids, dimensions, embeddings) 92 \"\"\" 94 # Consume stream and transform documents to vectors ---> 95 ids, dimensions, batches, stream = self.model.index(self.stream(documents), self.batch) 97 # Check that embeddings are available and load as a memmap 98 embeddings = None File ~/Documents/LLM_test/.venv/lib/python3.12/site-packages/txtai/vectors/base.py:136, in Vectors.index(self, documents, batchsize) 134 # Final batch 135 if batch: --> 136 uids, dimensions = self.batch(batch, output) 137 ids.extend(uids) 138 batches += 1 File ~/Documents/LLM_test/.venv/lib/python3.12/site-packages/txtai/vectors/base.py:195, in Vectors.batch(self, documents, output) 192 dimensions = None 194 # Build embeddings --> 195 embeddings = self.vectorize(documents) 196 if embeddings is not None: 197 dimensions = embeddings.shape[1] File ~/Documents/LLM_test/.venv/lib/python3.12/site-packages/txtai/vectors/base.py:241, in Vectors.vectorize(self, data) 225 \"\"\" 226 Runs data vectorization, which consists of the following steps. 227 (...) 237 embeddings vectors 238 \"\"\" 240 # Transform data into vectors --> 241 embeddings = self.encode(data) 243 if embeddings is not None: 244 # Truncate embeddings, if necessary 245 if self.dimensionality and self.dimensionality < embeddings.shape[1]: File ~/Documents/LLM_test/.venv/lib/python3.12/site-packages/txtai/vectors/external.py:31, in External.encode(self, data) 28 def encode(self, data): 29 # Call external transform function, if available and data not already an array 30 if self.transform and data and not isinstance(data[0], np.ndarray): ---> 31 data = self.transform(data) 33 # Cast to float32 34 return data.astype(np.float32) if isinstance(data, np.ndarray) else np.array(data, dtype=np.float32) Cell In[4], line 2, in transform(text) 1 def transform(text): ----> 2 n_array = ollama.embeddings(model=\"all-minilm\", prompt=text) 3 return np.array(n_array[\"embedding\"]) File ~/Documents/LLM_test/.venv/lib/python3.12/site-packages/ollama/_client.py:201, in Client.embeddings(self, model, prompt, options, keep_alive) 194 def embeddings( 195 self, 196 model: str = '', (...) 199 keep_alive: Optional[Union[float, str]] = None, 200 ) -> Mapping[str, Sequence[float]]: --> 201 return self._request( 202 'POST', 203 '/api/embeddings', 204 json={ 205 'model': model, 206 'prompt': prompt, 207 'options': options or {}, 208 'keep_alive': keep_alive, 209 }, 210 ).json() File ~/Documents/LLM_test/.venv/lib/python3.12/site-packages/ollama/_client.py:74, in Client._request(self, method, url, **kwargs) 72 response.raise_for_status() 73 except httpx.HTTPStatusError as e: ---> 74 raise ResponseError(e.response.text, e.response.status_code) from None 76 return response ResponseError: json: cannot unmarshal array into Go struct field EmbeddingRequest.prompt of type string" }
davidmezzetti commented 4 months ago

Hello,

Thanks for giving txtai a try. I had to modify the code a bit to get it to work. See below. The main thing is that the transform call passes an list of inputs vs a single input.

import ollama
import numpy as np
from txtai import Embeddings

def transform(inputs):
    return np.array([ollama.embeddings(model="all-minilm", prompt=text)["embedding"] for text in inputs])

def test_stream(dataset):
    for i, row in enumerate(dataset):
        yield i,transform(row)

data = [
  "US tops 5 million confirmed virus cases",
  "Canada's last fully intact ice shelf has suddenly collapsed, " +
  "forming a Manhattan-sized iceberg",
  "Beijing mobilises invasion craft along coast as Taiwan tensions escalate",
  "The National Park Service warns against sacrificing slower friends " +
  "in a bear attack",
  "Maine man wins $1M from $25 lottery ticket",
  "Make huge profits without work, earn up to $100,000 a day"
]

embeddings = Embeddings({"transform": transform, "backend": "numpy", "content": True})
embeddings.index(data)
embeddings.search("feel good story", 1)
davidmezzetti commented 4 months ago

Closing due to inactivity. Please re-open or open a new issue if this issue persists.

davidmezzetti commented 4 months ago

I should have mentioned that ollama support is directly built into txtai via LiteLLM.

from txtai import Embeddings

data = [
  "US tops 5 million confirmed virus cases",
  "Canada's last fully intact ice shelf has suddenly collapsed, " +
  "forming a Manhattan-sized iceberg",
  "Beijing mobilises invasion craft along coast as Taiwan tensions escalate",
  "The National Park Service warns against sacrificing slower friends " +
  "in a bear attack",
  "Maine man wins $1M from $25 lottery ticket",
  "Make huge profits without work, earn up to $100,000 a day"
]

embeddings = Embeddings(path="ollama/all-minilm", backend="numpy", content=True)
embeddings.index(data)
embeddings.search("feel good story", 1)