run-llama / llama_docs_bot

Bottoms Up Development with LlamaIndex - Building a Documentation Chatbot
MIT License
138 stars 44 forks source link

3_Eval_BasLine : ValueError: contexts and response must be provided #7

Open CSHproject opened 9 months ago

CSHproject commented 9 months ago

When I run this part of the code :

from llama_index.evaluation import ResponseEvaluator

gpt-4 evaluator!

evaluator = ResponseEvaluator(service_context=gpt4_service_context)

total_correct, all_results = evaluate_query_engine(evaluator, query_engine, question_dataset)

print(f"Hallucination? Scored {total_correct} out of {len(question_dataset)} questions correctly.")

It return me this Error :

ValueError Traceback (most recent call last) Cell In[14], line 6 3 # gpt-4 evaluator! 4 evaluator = ResponseEvaluator(service_context=gpt4_service_context) ----> 6 total_correct, all_results = evaluate_query_engine(evaluator, query_engine, question_dataset) 8 print(f"Hallucination? Scored {total_correct} out of {len(question_dataset)} questions correctly.")

Cell In[13], line 25 22 print(f"finished batch {(batch_size // 5) + 1} out of {len(questions) // 5}") 24 for response in responses: ---> 25 eval_result = 1 if "YES" in evaluator.evaluate(response) else 0 26 total_correct += eval_result 27 all_results.append(eval_result)

File c:\Users\meniv\AppData\Local\anaconda3\envs\llama_env\Lib\site-packages\llama_index\evaluation\base.py:56, in BaseEvaluator.evaluate(self, query, response, contexts, kwargs) 43 def evaluate( 44 self, 45 query: Optional[str] = None, (...) 48 kwargs: Any, 49 ) -> EvaluationResult: 50 """Run evaluation with query string, retrieved contexts, 51 and generated response string. 52 53 Subclasses can override this method to provide custom evaluation logic and 54 take in additional arguments. 55 """ ---> 56 return asyncio.run( 57 self.aevaluate( 58 query=query, 59 response=response, 60 contexts=contexts, 61 **kwargs, 62 ) 63 )

File c:\Users\meniv\AppData\Local\anaconda3\envs\llama_env\Lib\site-packages\nest_asyncio.py:31, in _patch_asyncio..run(main, debug) 29 task = asyncio.ensure_future(main) 30 try: ---> 31 return loop.run_until_complete(task) 32 finally: 33 if not task.done():

File c:\Users\meniv\AppData\Local\anaconda3\envs\llama_env\Lib\site-packages\nest_asyncio.py:99, in _patch_loop..run_until_complete(self, future) 96 if not f.done(): 97 raise RuntimeError( 98 'Event loop stopped before Future completed.') ---> 99 return f.result()

File c:\Users\meniv\AppData\Local\anaconda3\envs\llama_env\Lib\asyncio\futures.py:203, in Future.result(self) 201 self.__log_traceback = False 202 if self._exception is not None: --> 203 raise self._exception.with_traceback(self._exception_tb) 204 return self._result

File c:\Users\meniv\AppData\Local\anaconda3\envs\llama_env\Lib\asyncio\tasks.py:304, in Task.step_run_and_handle_result(failed resolving arguments) 300 try: 301 if exc is None: 302 # We use the send method directly, because coroutines 303 # don't have iter and next__ methods. --> 304 result = coro.send(None) 305 else: 306 result = coro.throw(exc)

File c:\Users\meniv\AppData\Local\anaconda3\envs\llama_env\Lib\site-packages\llama_index\evaluation\faithfulness.py:130, in FaithfulnessEvaluator.aevaluate(failed resolving arguments) 127 await asyncio.sleep(sleep_time_in_seconds) 129 if contexts is None or response is None: --> 130 raise ValueError("contexts and response must be provided") 132 docs = [Document(text=context) for context in contexts] 133 index = SummaryIndex.from_documents(docs, service_context=self._service_context)

ValueError: contexts and response must be provided

samueuh commented 8 months ago

Facing the same issue right now - any updates regarding this? Once it starts to evaluate the first batch I receive the following error message

`--------------------------------------------------------------------------- ValueError Traceback (most recent call last) in <cell line: 6>() 4 evaluator = ResponseEvaluator(service_context=gpt4_service_context) 5 ----> 6 total_correct, all_results = evaluate_query_engine(evaluator, query_engine, question_dataset) 7 8 print(f"Hallucination? Scored {total_correct} out of {len(question_dataset)} questions correctly.")

6 frames in evaluate_query_engine(evaluator, query_engine, questions) 24 for response in responses: 25 print(response.response) ---> 26 eval_result = 1 if "YES" in evaluator.evaluate(response) else 0 27 total_correct += eval_result 28 all_results.append(eval_result)

/usr/local/lib/python3.10/dist-packages/llama_index/evaluation/base.py in evaluate(self, query, response, contexts, **kwargs) 54 take in additional arguments. 55 """ ---> 56 return asyncio.run( 57 self.aevaluate( 58 query=query,

/usr/local/lib/python3.10/dist-packages/nest_asyncio.py in run(main, debug) 29 task = asyncio.ensure_future(main) 30 try: ---> 31 return loop.run_until_complete(task) 32 finally: 33 if not task.done():

/usr/local/lib/python3.10/dist-packages/nest_asyncio.py in run_until_complete(self, future) 97 raise RuntimeError( 98 'Event loop stopped before Future completed.') ---> 99 return f.result() 100 101 def _run_once(self):

/usr/lib/python3.10/asyncio/futures.py in result(self) 199 self.__log_traceback = False 200 if self._exception is not None: --> 201 raise self._exception.with_traceback(self._exception_tb) 202 return self._result 203

/usr/lib/python3.10/asyncio/tasks.py in step(failed resolving arguments) 230 # We use the send method directly, because coroutines 231 # don't have `iterandnext__` methods. --> 232 result = coro.send(None) 233 else: 234 result = coro.throw(exc)

/usr/local/lib/python3.10/dist-packages/llama_index/evaluation/faithfulness.py in aevaluate(failed resolving arguments) 128 129 if contexts is None or response is None: --> 130 raise ValueError("contexts and response must be provided") 131 132 docs = [Document(text=context) for context in contexts]

ValueError: contexts and response must be provided`

designcomputer commented 8 months ago

I've run into the same problem while trying to follow along with the Youtube video.

Omegapy commented 8 months ago

I encountered the same problem following along with the YouTube video. image I created a print statement for the response in the function evaluate_query_engine(), it seems that the response is not none, it seems that the problem is the contexts variable = none.


ValueError Traceback (most recent call last) Cell In[62], line 6 3 # gpt-4 evaluator! 4 evaluator = ResponseEvaluator(service_context=gpt4_service_context) ----> 6 total_correct, all_results = evaluate_query_engine(evaluator, query_engine, question_dataset) 8 print(f"Hallucination? Scored {total_correct} out of {len(question_dataset)} questions correctly.")

Cell In[61], line 26, in evaluate_query_engine(evaluator, query_engine, questions) 24 for response in responses: 25 print(f"reponse: ", response) ---> 26 if "YES" in evaluator.evaluate_response(response): 27 eval_result = 1 28 else:

File ~\anaconda3\Lib\site-packages\llama_index\evaluation\base.py:98, in BaseEvaluator.evaluate_response(self, query, response, kwargs) 87 def evaluate_response( 88 self, 89 query: Optional[str] = None, 90 response: Optional[Response] = None, 91 kwargs: Any, 92 ) -> EvaluationResult: 93 """Run evaluation with query string and generated Response object. 94 95 Subclasses can override this method to provide custom evaluation logic and 96 take in additional arguments. 97 """ ---> 98 return asyncio.run( 99 self.aevaluate_response(query=query, response=response, **kwargs) 100 )

File ~\anaconda3\Lib\site-packages\nest_asyncio.py:31, in _patch_asyncio..run(main, debug) 29 task = asyncio.ensure_future(main) 30 try: ---> 31 return loop.run_until_complete(task) 32 finally: 33 if not task.done():

File ~\anaconda3\Lib\site-packages\nest_asyncio.py:99, in _patch_loop..run_until_complete(self, future) 96 if not f.done(): 97 raise RuntimeError( 98 'Event loop stopped before Future completed.') ---> 99 return f.result()

File ~\anaconda3\Lib\asyncio\futures.py:203, in Future.result(self) 201 self.__log_traceback = False 202 if self._exception is not None: --> 203 raise self._exception.with_traceback(self._exception_tb) 204 return self._result

File ~\anaconda3\Lib\asyncio\tasks.py:267, in Task.step(failed resolving arguments) 263 try: 264 if exc is None: 265 # We use the send method directly, because coroutines 266 # don't have `iterandnext__` methods. --> 267 result = coro.send(None) 268 else: 269 result = coro.throw(exc)

File ~\anaconda3\Lib\site-packages\llama_index\evaluation\base.py:119, in BaseEvaluator.aevaluate_response(self, query, response, kwargs) 116 response_str = response.response 117 contexts = [node.get_content() for node in response.source_nodes] --> 119 return await self.aevaluate( 120 query=query, response=response_str, contexts=contexts, kwargs 121 )

File ~\anaconda3\Lib\site-packages\llama_index\evaluation\faithfulness.py:130, in FaithfulnessEvaluator.aevaluate(failed resolving arguments) 127 await asyncio.sleep(sleep_time_in_seconds) 129 if contexts is None or response is None: --> 130 raise ValueError("contexts and response must be provided") 132 docs = [Document(text=context) for context in contexts] 133 index = SummaryIndex.from_documents(docs, service_context=self._service_context)

ValueError: contexts and response must be provided

Omegapy commented 8 months ago

"ValueError: contexts and response must be provided"

For those having the "ValueError: contexts and response must be provided" cited above

Solution Below

First, the ResponseEvaluator class is a legacy class From anaconda3\Lib\site-packages\llama_index\evaluation\faithfulness.py (in anaconda env.)

# legacy: backward compatibility
ResponseEvaluator = FaithfulnessEvaluator

Replace:

for response in responses:
            eval_result = 1 if "YES" in evaluator.evaluate(response) else 0
            total_correct += eval_result
            all_results.append(eval_result)

with

for response in responses:
            if evaluator.evaluate_response(response=response).passing: 
                eval_result = 1
            else:
                eval_result = 0
            total_correct += eval_result
            all_results.append(eval_result)

Replace:

from llama_index.evaluation import ResponseEvaluator

# gpt-4 evaluator!
evaluator = ResponseEvaluator(service_context=gpt4_service_context)

with

from llama_index.evaluation import FaithfulnessEvaluator

# gpt-4 evaluator!
evaluator = FaithfulnessEvaluator(service_context=gpt4_service_context)

My solution:

import time
import asyncio
import nest_asyncio
nest_asyncio.apply()

from llama_index import Response

def evaluate_query_engine(evaluator, query_engine, questions):
    async def run_query(query_engine, q):
        try:
            return await query_engine.aquery(q)
        except:
            return Response(response="Error, query failed.")

    total_correct = 0
    all_results = []
    for batch_size in range(0, len(questions), 5):
        batch_qs = questions[batch_size:batch_size+5]

        tasks = [run_query(query_engine, q) for q in batch_qs]
        responses = asyncio.run(asyncio.gather(*tasks))
        print(f"finished batch {(batch_size // 5) + 1} out of {len(questions) // 5}")

        for response in responses:
            if evaluator.evaluate_response(response=response).passing: 
                eval_result = 1
            else:
                eval_result = 0
            total_correct += eval_result
            all_results.append(eval_result)

        # helps avoid rate limits
        time.sleep(1)

    return total_correct, all_results
from llama_index.evaluation import FaithfulnessEvaluator

# gpt-4 evaluator!
evaluator = FaithfulnessEvaluator(service_context=gpt4_service_context)

total_correct, all_results = evaluate_query_engine(evaluator, query_engine, question_dataset)

print(f"Hallucination? Scored {total_correct} out of {len(question_dataset)} questions correctly.")
diegoami commented 7 months ago

Evaluating Response for Answer Quality

import time
import asyncio
import nest_asyncio
nest_asyncio.apply()
from llama_index import Response

def evaluate_query_engine(evaluator, query_engine, questions):
    async def run_query(query_engine, q):
        try:
            return await query_engine.aquery(q)
        except:
            return Response(response="Error, query failed.")

    total_correct = 0
    all_results = []
    for batch_size in range(0, len(questions), 5):
        batch_qs = questions[batch_size:batch_size+5]

        tasks = [run_query(query_engine, q) for q in batch_qs]
        responses = asyncio.run(asyncio.gather(*tasks))
        print(f"finished batch {(batch_size // 5) + 1} out of {len(questions) // 5}")

        for query, response in zip(batch_qs, responses):

            if evaluator.evaluate_response(query=query, response=response).passing: 
                eval_result = 1
            else:
                eval_result = 0
            total_correct += eval_result
            all_results.append(eval_result)

        # helps avoid rate limits
        time.sleep(1)

    return total_correct, all_results
from llama_index.evaluation import QueryResponseEvaluator

evaluator = QueryResponseEvaluator(service_context=gpt4_service_context)
total_correct, all_results = evaluate_query_engine(evaluator, query_engine, question_dataset)

print(f"Response satisfies the query? Scored {total_correct} out of {len(question_dataset)} questions correctly.")