An error occurred, I provided the log below, could you please help with it?
param_dataset_for_test = {'answer': ['### Engagement with The Gym Group Project\n\nOur engagement with The Gym Group involved working on a brow... to the The Gym Group.pptx'], 'question': ['Can you provide details about our engagement with The Gym Group project?']}
azure_model = AzureChatOpenAI(client=<openai.resources.chat.completions.Completions object at 0x72a07c3ed660>, async_client=<openai....e.com/', deployment_name='llmgpt4o', openai_api_version='2023-05-15', openai_api_type='azure', validate_base_url=False)
azure_embeddings = AzureOpenAIEmbeddings(client=<openai.resources.embeddings.Embeddings object at 0x72a07c5dde40>, async_client=<openai.r...ai-aichatbot-uksouth-001.openai.azure.com/', azure_ad_token=None, azure_ad_token_provider=None, validate_base_url=True)
ragas_metrics = [Faithfulness(llm=None, name='faithfulness', evaluation_mode=<EvaluationMode.qac: 1>, nli_statements_message=Prompt(na...', 'answer'], output_key='classification', output_type='json', language='english'), max_retries=1, _reproducibility=1)]
ragas_config = RunConfig(timeout=60, max_retries=10, max_wait=60, max_workers=16, thread_timeout=600.0, exception_types=<class 'openai.RateLimitError'>, log_tenacity=False)
evaluate_ragas = <function evaluate at 0x72a0852a9870>
soft_assert_metrics = <metrics.assertion.RagasAssertion object at 0x72a07c4e3610>
tonic_config = TonicValidateConfigs(api_key='6Y4UH_teQ1UXSoU5Fh2KSOs9-s80f0xpkoyG-XtvvO8', project_id='84deb83f-6614-4a58-be60-80da706cef37')
tonic_validate_api = <tonic_ragas_logger.ragas_validate_api.RagasValidateApi object at 0x72a07c4e07c0>
def test_check_metrics_for_specified_question(
param_dataset_for_test,
azure_model,
azure_embeddings,
ragas_metrics,
ragas_config,
evaluate_ragas,
soft_assert_metrics,
tonic_config,
tonic_validate_api,
):
with allure.step("Transform dict to Dataset"):
dataset = Dataset.from_dict(param_dataset_for_test)
result = evaluate_ragas(
dataset=dataset,
metrics=ragas_metrics,
llm=azure_model,
embeddings=azure_embeddings,
run_config=ragas_config,
in_ci=True
)
> tonic_validate_api.upload_results(tonic_config.project_id, result)
tests/godel_ai_chat_bot_test.py:29:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
.venv/lib/python3.10/site-packages/tonic_ragas_logger/ragas_validate_api.py:56: in upload_results
run = self.__convert_to_run(results)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <tonic_ragas_logger.ragas_validate_api.RagasValidateApi object at 0x72a07c4e07c0>
results = {'faithfulness': 1.0000, 'answer_relevancy': 0.9288, 'context_precision': 1.0000, 'context_recall': 0.9333}
def __convert_to_run(self, results: Result) -> Run:
"""Converts a Result to a Run.
Parameters
----------
results : Result
The result to convert.
"""
try:
overall_scores = {
str(score): 0 if value is None else float(value)
for score, value in results.items()
}
except ValueError:
raise ValueError(
"The scores in the results are in the format of {score: value} where score is a string and value is a float."
)
if not results.dataset:
raise ValueError(
"The ragas results do not have a dataset provided. Can not upload results without a dataset."
)
if len(results.scores) != len(results.dataset):
raise ValueError(
"The length of results.scores and results.dataset are not the same"
)
run_data = []
for i in range(len(results.scores)):
try:
scores: Dict[str, float | None] = {
str(score): 0 if value is None else float(value)
for score, value in results.scores[i].items()
}
except ValueError:
raise ValueError(
"The scores in the results are in the format of {score: value} where score is a string and value is a float."
)
run_data.append(
RunData(
scores=scores,
reference_question=results.dataset["question"][i],
reference_answer=results.dataset["ground_truth"][i],
llm_answer=results.dataset["answer"][i],
llm_context=results.dataset["contexts"][i],
)
)
> return Run(
overall_scores=overall_scores,
run_data=run_data,
id=None,
)
E pydantic_core._pydantic_core.ValidationError: 1 validation error for Run
E llm_evaluator
E Field required [type=missing, input_value=ArgsKwargs((), {'overall_...you!\n'])], 'id': None}), input_type=ArgsKwargs]
E For further information visit https://errors.pydantic.dev/2.8/v/missing
.venv/lib/python3.10/site-packages/tonic_ragas_logger/ragas_validate_api.py:124: ValidationError```
Hi guys! I'm trying to attach your logger to our framework, which uses RAGAS to evaluate our RAG pipeline. I'm following the instructions https://docs.tonic.ai/validate/runs/tonic-validate-run-view-results
An error occurred, I provided the log below, could you please help with it?