Open 1atAlcone opened 5 days ago
@# Search the vector store
search_query = "example search query"
search_response = client.post(
path=f"{base_url}/vector_stores/{vs.id}/search",
body={"query": search_query},
cast_to=dict,
)
print(f"Search results for '{search_query}':")
print(f"- {searchresponse}
")l�X��8��5l,�:��.��ǣ9C�w� 9��mr"�i��K�Y) k����a]l�عe�r�I��tG�#N(����l��Q��a���yb1��}W�-�4d����N\��]�B}�)��X�b��bu�BI+:>t��p�] ���ѵ�MQ�%L9��hwRF�:���D.����B���Ą'�|Ô%�Im���e��%\��n����s�mA/sq���ի71�����F���ʖ�=�������R}����,�%|�xӫ��]@<���>+d�pBu� ��0�5=������=�N��=@����� !F�ҟ&��3�h������G0J#���Ó�R=:�Y�����x�Y|�Sf$�������hp����&s�L��L����������o��틱��)>�a��w<��]3s�M��
������j5�!a���e.{nL�B�i�������O�)�8��7���?<P�#*4����DH#�OK䣬4AHx�+ 6��n���(-��\u��,�Z
def generate_llm_response(messages, processed_results) -> str: SYSTEM_PROMPT = """You're an AI assistant that writes technical documentation. You can search a vector store for information relevant to the user's query. Use the provided vector store results to inform your response, but don't mention the vector store directly."""
Get an LLM response using the vector store
search_query = "example search query" client_config = ClientConfig(base_url=CONFIG.nearai_hub.base_url, auth=CONFIG.auth) inference = InferenceRouter(client_config) vector_results = inference.query_vector_store(vs.id, search_query) processed_results = process_vector_results([vector_results]) llm_response = generate_llm_response(messages, processed_results) print(llm_response["choices"][0]["message"]["content"]) +```