Giskard-AI / giskard

🐢 Open-Source Evaluation & Testing for ML & LLM systems
https://docs.giskard.ai
Apache License 2.0
4.03k stars 261 forks source link

Large knowledge base leads to context length exceeded error #2050

Open Tobi696 opened 1 week ago

Tobi696 commented 1 week ago

Issue Type

Bug

Source

source

Giskard Library Version

2.15.2

OS Platform and Distribution

macos

Python version

3.11

Installed python packages

aiohttp==3.9.4
aiosignal==1.3.1
alembic==1.13.3
annotated-types==0.7.0
anthropic==0.25.9
antlr4-python3-runtime==4.9.3
anyio==4.6.0
APScheduler==3.10.4
argslib==0.1.0
asgiref==3.8.1
astroid==2.15.8
asttokens==2.4.1
attrs==24.2.0
aws_secretsmanager_caching==1.1.3
backoff==2.2.1
bcrypt==4.2.0
beautifulsoup4==4.12.3
behave==1.2.6
bert-score==0.3.13
blinker==1.8.2
bokeh==3.4.3
boto3==1.34.162
botocore==1.34.162
build==1.2.2
CacheControl==0.14.0
cachetools==5.5.0
cat_app_client==1.6.1rc2
cat_client==0.1.0
catbackend==1.6.1rc2
certifi==2024.8.30
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.3.2
chroma-hnswlib==0.7.3
chromadb==0.4.24
cleo==2.1.0
click==8.1.7
cloudpickle==3.1.0
colorama==0.4.6
coloredlogs==15.0.1
contourpy==1.3.0
controllerlib==0.1.0
coverage==7.6.1
crashtest==0.4.1
cryptography==43.0.1
cycler==0.12.1
databricks-sdk==0.35.0
dataclasses-json==0.6.7
datasets==3.0.1
decorator==5.1.1
defusedxml==0.7.1
Deprecated==1.2.14
dill==0.3.9
distlib==0.3.8
distro==1.9.0
docopt==0.6.2
dodgy==0.2.1
dulwich==0.21.7
durationpy==0.8
effdet==0.4.1
emoji==2.14.0
eval_type_backport==0.2.0
evaluate==0.4.3
Events==0.5
executing==2.1.0
faiss-cpu==1.8.0
fastapi==0.115.0
fastjsonschema==2.20.0
filelock==3.16.1
filetype==1.2.0
flake8==5.0.4
flake8-polyfill==1.0.2
Flask==2.3.3
Flask-APScheduler==1.13.1
Flask-Cors==5.0.0
flatbuffers==24.3.25
fonttools==4.54.1
frozenlist==1.4.1
fsspec==2024.9.0
giskard==2.15.2
gitdb==4.0.11
GitPython==3.1.43
google-api-core==2.20.0
google-auth==2.35.0
google-cloud-core==2.4.1
google-cloud-firestore==2.19.0
google-cloud-vision==3.7.4
googleapis-common-protos==1.59.1
griffe==0.48.0
grpcio==1.66.2
grpcio-status==1.62.3
h11==0.14.0
httpcore==1.0.5
httptools==0.6.1
httpx==0.27.2
huggingface-hub==0.25.1
humanfriendly==10.0
idna==3.10
importlib_metadata==8.4.0
importlib_resources==6.4.5
iniconfig==2.0.0
installer==0.7.0
iopath==0.1.10
ipython==8.28.0
isort==5.13.2
itsdangerous==2.2.0
jaraco.classes==3.4.0
jedi==0.19.1
Jinja2==3.1.4
jira==3.8.0
jiter==0.5.0
jmespath==1.0.1
joblib==1.4.2
jsonpatch==1.33
jsonpath-python==1.0.6
jsonpointer==3.0.0
keyring==24.3.1
kiwisolver==1.4.7
kubernetes==31.0.0
langchain==0.2.16
langchain-aws==0.1.18
langchain-chroma==0.1.4
langchain-community==0.2.17
langchain-core==0.2.41
langchain-openai==0.1.25
langchain-postgres==0.0.9
langchain-text-splitters==0.2.4
langdetect==1.0.9
langgraph==0.1.19
langsmith==0.1.129
layoutparser==0.3.4
lazy-object-proxy==1.10.0
libmagic==1.0
llvmlite==0.43.0
lxml==5.3.0
Mako==1.3.5
Markdown==3.7
markdown-it-py==3.0.0
MarkupSafe==2.1.5
marshmallow==3.22.0
matplotlib==3.9.2
matplotlib-inline==0.1.7
mccabe==0.7.0
mdurl==0.1.2
mixpanel==4.10.1
mlflow-skinny==2.17.0
mmh3==5.0.1
monotonic==1.6
more-itertools==10.5.0
mpmath==1.3.0
msgpack==1.1.0
multidict==6.1.0
multiprocess==0.70.16
mypy==1.11.2
mypy-extensions==1.0.0
nest-asyncio==1.6.0
networkx==3.4.2
nltk==3.9.1
num2words==0.5.13
numba==0.60.0
numpy==1.26.4
oauthlib==3.2.2
olefile==0.47
omegaconf==2.3.0
onnx==1.17.0
onnxruntime==1.19.2
openai==1.50.2
opencv-python==4.10.0.84
opensearch-py==2.7.1
opentelemetry-api==1.27.0
opentelemetry-exporter-jaeger-proto-grpc==1.21.0
opentelemetry-exporter-otlp-proto-common==1.27.0
opentelemetry-exporter-otlp-proto-grpc==1.27.0
opentelemetry-instrumentation==0.48b0
opentelemetry-instrumentation-asgi==0.48b0
opentelemetry-instrumentation-fastapi==0.48b0
opentelemetry-propagator-aws-xray==1.0.2
opentelemetry-proto==1.27.0
opentelemetry-sdk==1.27.0
opentelemetry-sdk-extension-aws==2.0.2
opentelemetry-semantic-conventions==0.48b0
opentelemetry-util-http==0.48b0
orjson==3.10.7
overrides==7.7.0
packaging==24.1
pandas==2.2.3
parse==1.20.2
parse_type==0.6.3
parso==0.8.4
pdf2image==1.17.0
pdfminer.six==20231228
pdfplumber==0.11.4
pep8-naming==0.10.0
pexpect==4.9.0
pgvector==0.2.5
pi_heif==0.20.0
pikepdf==9.3.0
pillow==10.4.0
pinecone-client==3.2.2
pip==24.2
pkginfo==1.11.1
platformdirs==4.3.6
pluggy==1.5.0
poetry==1.8.3
poetry-core==1.9.0
poetry-git-version-plugin==1.1.0
poetry-plugin-export==1.8.0
portalocker==2.10.1
posthog==3.6.6
prompt_toolkit==3.0.48
propcache==0.2.0
prospector==1.10.3
proto-plus==1.24.0
protobuf==4.25.5
psutil==6.1.0
psycopg==3.2.3
psycopg-binary==3.2.3
psycopg-pool==3.2.3
ptyprocess==0.7.0
pulsar-client==3.5.0
pure_eval==0.2.3
py==1.11.0
pyarrow==17.0.0
pyasn1==0.6.1
pyasn1_modules==0.4.1
pycocotools==2.0.8
pycodestyle==2.9.1
pycparser==2.22
pydantic==2.9.2
pydantic_core==2.23.4
pydocstyle==6.3.0
pyflakes==2.5.0
Pygments==2.18.0
pyinstrument==4.7.3
PyJWT==2.9.0
pylint==2.17.7
pylint-celery==0.3
pylint-django==2.5.3
pylint-flask==0.6
pylint-plugin-utils==0.7
pynndescent==0.5.13
pyparsing==3.2.0
pypdf==5.0.1
pypdfium2==4.30.0
PyPika==0.48.9
pyproject_hooks==1.2.0
pytest==6.2.5
pytest-cov==4.1.0
python-dateutil==2.9.0.post0
python-dotenv==1.0.1
python-iso639==2024.4.27
python-magic==0.4.27
python-multipart==0.0.12
python-oxmsg==0.0.1
pytz==2024.2
PyYAML==6.0.2
RapidFuzz==3.10.0
regex==2024.9.11
requests==2.32.3
requests-oauthlib==2.0.0
requests-toolbelt==1.0.0
requirements-detector==1.2.2
rich==13.8.1
rsa==4.9
ruff==0.1.15
s3transfer==0.10.2
safetensors==0.4.5
scikit-learn==1.5.2
scipy==1.11.4
semver==3.0.2
setoptconf-tmp==0.3.1
setuptools==75.1.0
shellingham==1.5.4
six==1.16.0
smmap==5.0.1
sniffio==1.3.1
snowballstemmer==2.2.0
soupsieve==2.6
SQLAlchemy==2.0.35
sqlparse==0.5.1
stack-data==0.6.3
starlette==0.38.6
sympy==1.13.3
tabulate==0.9.0
tenacity==8.5.0
threadpoolctl==3.5.0
tiktoken==0.7.0
timm==1.0.11
tokenizers==0.20.0
toml==0.10.2
tomlkit==0.13.2
torch==2.5.0
torchvision==0.20.0
tornado==6.4.1
tqdm==4.66.5
traitlets==5.14.3
transformers==4.45.2
trove-classifiers==2024.9.12
typer==0.12.5
types-requests==2.32.0.20240914
typing_extensions==4.12.2
typing-inspect==0.9.0
tzdata==2024.2
tzlocal==5.2
umap-learn==0.5.6
unstructured==0.16.0
unstructured-client==0.26.1
unstructured-inference==0.7.36
unstructured.pytesseract==0.3.13
urllib3==2.2.3
uvicorn==0.31.0
uvloop==0.20.0
virtualenv==20.26.6
waitress==3.0.0
watchfiles==0.24.0
wcwidth==0.2.13
websocket-client==1.8.0
websockets==13.1
Werkzeug==3.0.3
wrapt==1.16.0
xattr==1.1.0
xxhash==3.5.0
xyzservices==2024.9.0
yarl==1.13.1
zipp==3.20.2
zstandard==0.23.0

Current Behaviour?

Adding the PDF files to the knowledge base leads to maximum context length exceeded error.

Standalone code OR list down the steps to reproduce the issue

from langchain_community.document_loaders import DirectoryLoader
from giskard.llm.client.openai import OpenAIClient
from giskard.llm.embeddings.openai import OpenAIEmbedding
from giskard.rag import generate_testset
from giskard.rag import KnowledgeBase
import pandas as pd
import giskard

openai_client = OpenAIClient(model="gpt-4o-mini")
giskard.llm.set_llm_api("openai")
giskard.llm.set_default_client(openai_client)

# documents = load your documents
loader_txt = DirectoryLoader("./website", glob="**/*.txt", show_progress=True)
loader_pdf = DirectoryLoader("./pdf", glob="**/*.pdf", show_progress=True)
documents_txt = loader_txt.load()
documents_pdf = loader_pdf.load()
documents = documents_txt + documents_pdf

df = pd.DataFrame([d.page_content for d in documents], columns=["text"])

knowledge_base = KnowledgeBase(df)

testset = generate_testset(
    knowledge_base,
    num_questions=50,
    agent_description="Ein Chatbot, der Fragen zur Website beantwortet",
    language='de',
)

testset.save("test_questions.jsonl")

Relevant log output

Traceback (most recent call last):
  File "/Users/tobias.wimmer/Documents/ByteSource/cat-backend-v2/libs/test/generate_test_questions.py", line 24, in <module>
    testset = generate_testset(
              ^^^^^^^^^^^^^^^^^
  File "/Users/tobias.wimmer/Library/Caches/pypoetry/virtualenvs/catbackend-RFqGU8Am-py3.11/lib/python3.11/site-packages/giskard/rag/testset_generation.py", line 63, in generate_testset
    _ = knowledge_base.topics
        ^^^^^^^^^^^^^^^^^^^^^
  File "/Users/tobias.wimmer/Library/Caches/pypoetry/virtualenvs/catbackend-RFqGU8Am-py3.11/lib/python3.11/site-packages/giskard/rag/knowledge_base.py", line 240, in topics
    self._topics_inst = self._find_topics()
                        ^^^^^^^^^^^^^^^^^^^
  File "/Users/tobias.wimmer/Library/Caches/pypoetry/virtualenvs/catbackend-RFqGU8Am-py3.11/lib/python3.11/site-packages/giskard/rag/knowledge_base.py", line 251, in _find_topics
    clustering = hdbscan.fit(self._reduced_embeddings)
                             ^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/tobias.wimmer/Library/Caches/pypoetry/virtualenvs/catbackend-RFqGU8Am-py3.11/lib/python3.11/site-packages/giskard/rag/knowledge_base.py", line 208, in _reduced_embeddings
    self._reduced_embeddings_inst = reducer.fit_transform(self._embeddings)
                                                          ^^^^^^^^^^^^^^^^
  File "/Users/tobias.wimmer/Library/Caches/pypoetry/virtualenvs/catbackend-RFqGU8Am-py3.11/lib/python3.11/site-packages/giskard/rag/knowledge_base.py", line 192, in _embeddings
    self._embeddings_inst = np.array(self._embedding_model.embed([doc.content for doc in self._documents]))
                                     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/tobias.wimmer/Library/Caches/pypoetry/virtualenvs/catbackend-RFqGU8Am-py3.11/lib/python3.11/site-packages/giskard/llm/embeddings/openai.py", line 53, in embed
    response = self.client.embeddings.create(input=batch, model=self.model)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/tobias.wimmer/Library/Caches/pypoetry/virtualenvs/catbackend-RFqGU8Am-py3.11/lib/python3.11/site-packages/openai/resources/embeddings.py", line 124, in create
    return self._post(
           ^^^^^^^^^^^
  File "/Users/tobias.wimmer/Library/Caches/pypoetry/virtualenvs/catbackend-RFqGU8Am-py3.11/lib/python3.11/site-packages/openai/_base_client.py", line 1270, in post
    return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/tobias.wimmer/Library/Caches/pypoetry/virtualenvs/catbackend-RFqGU8Am-py3.11/lib/python3.11/site-packages/openai/_base_client.py", line 947, in request
    return self._request(
           ^^^^^^^^^^^^^^
  File "/Users/tobias.wimmer/Library/Caches/pypoetry/virtualenvs/catbackend-RFqGU8Am-py3.11/lib/python3.11/site-packages/openai/_base_client.py", line 1051, in _request
    raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': {'message': "This model's maximum context length is 8192 tokens, however you requested 43874 tokens (43874 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.", 'type': 'invalid_request_error', 'param': None, 'code': None}}
henchaves commented 1 week ago

Hello @Tobi696, Thanks for reporting this issue. Could you give more details about the dataframe size that you're trying to use as KB? For example, the number of rows, the max number of characters in a single row and the average number of characters per row.

Tobi696 commented 1 week ago

Thanks for looking into it! I'm not that experienced in python and pandas, hope this code does what we need:

df = pd.DataFrame([d.page_content for d in documents], columns=["text"])

print(f'Number of rows: {df.shape[0]}')
print(f'Max number of characters in a single row: {df.text.str.len().max()}')
print(f'Average number of characters in a single row: {df.text.str.len().mean()}')
Number of rows: 168
Max number of characters in a single row: 286419
Average number of characters in a single row: 21418.47619047619

So these high numbers are the problem?

henchaves commented 1 week ago

@Tobi696 yeah, it seems to be, because the last line of error log was:

openai.BadRequestError: Error code: 400 - {'error': {'message': "This model's maximum context length is 8192 tokens, however you requested 43874 tokens (43874 in your prompt; 0 for the completion). Please reduce your prompt; or completion length.", 'type': 'invalid_request_error', 'param': None, 'code': None}}

And since the default OpenAIEmbedding has a batch_size of 40, it will try to merge 40 rows of your dataframe. You can try to reduce the number of characters in each row or reduce the batch_size, for example, executing the following code:

from openai import OpenAI
from giskard.llm.embeddings.openai import OpenAIEmbedding

client = OpenAI(...)

# create a custom embedding model to reduce the batch_size
embedding_model = OpenAIEmbedding(client=client, model="text-embedding-ada-002", batch_size=4)

# set the created embedding model as the default one
giskard.llm.embeddings.set_default_embedding(embedding_model)
Tobi696 commented 1 week ago

I get the same error with a reduced batch size unfortunately...

from langchain_community.document_loaders import DirectoryLoader
from giskard.llm.client.openai import OpenAIClient
from giskard.llm.embeddings.openai import OpenAIEmbedding
from giskard.rag import generate_testset
from giskard.rag import KnowledgeBase
import pandas as pd
import giskard
from openai import OpenAI

openai_client = OpenAIClient(model="gpt-4o-mini")
giskard.llm.set_llm_api("openai")
giskard.llm.set_default_client(openai_client)

client = OpenAI()
embedding_model = OpenAIEmbedding(client=client, model="text-embedding-ada-002", batch_size=4)
giskard.llm.embeddings.set_default_embedding(embedding_model)

# documents = load your documents
loader_txt = DirectoryLoader("./website", glob="**/*.txt", show_progress=True)
loader_pdf = DirectoryLoader("./pdf", glob="**/*.pdf", show_progress=True)
documents_txt = loader_txt.load()
documents_pdf = loader_pdf.load()
documents = documents_txt + documents_pdf

df = pd.DataFrame([d.page_content for d in documents], columns=["text"])

print(f'Number of rows: {df.shape[0]}')
print(f'Max number of characters in a single row: {df.text.str.len().max()}')
print(f'Average number of characters in a single row: {df.text.str.len().mean()}')

knowledge_base = KnowledgeBase(df, embedding_model=embedding_model)

testset = generate_testset(
    knowledge_base,
    num_questions=5,
    agent_description="Ein Chatbot, der Fragen zur Website beantwortet",
    language='de',
)

testset.save("test_questions.jsonl")
henchaves commented 1 week ago

@Tobi696 I managed to reproduce the same error on my side, I'll investigate and get back to you as soon as I have a solution

henchaves commented 1 week ago

Hello @Tobi696, indeed it seems that the number of tokens of a single row has exceeded the model limit. You can check the number of tokens for each row executing the following code:

import tiktoken

MODEL_NAME = "text-embedding-ada-002"

def num_tokens_from_string(string: str, encoding_name: str) -> int:
    """Returns the number of tokens in a text string."""
    encoding = tiktoken.encoding_for_model(MODEL_NAME)
    num_tokens = len(encoding.encode(string))
    return num_tokens

df["num_tokens"] = df["text"].apply(lambda x: num_tokens_from_string(x, MODEL_NAME))

print("Max num_tokens", df["num_tokens"].max())

If this number is higher than 8192, it won't work