langchain-ai / langchain-google

MIT License
93 stars 108 forks source link

async methods aren't working with ChatGoogleGenerativeAI #357

Closed azliabdullah closed 1 month ago

azliabdullah commented 1 month ago

Methods like astream, ainvoke, astream_events will throw execptions with Gemini 1.5. Regardless of calling via graph, chain or model.

I have made comparisons with ChatOpenAi, they all are working. And I have compared with sync methods, also work.

I'm using python 3.12.1 langchain-google-genai 1.0.7

Here's the reproducible code

from langchain_core.tools import tool
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_react_agent
from langchain_core.output_parsers import StrOutputParser

import asyncio

@tool
def check_weather(location: str) -> float:
    '''Return the weather forecast for the specified location.'''
    return f"It's always sunny in {location}"

tools = [check_weather]

# model = ChatOpenAI(
#     model="gpt-4o",
#     streaming=True
#     )
model = ChatGoogleGenerativeAI(
    model="gemini-1.5-flash",
    )

parser = StrOutputParser()
chain = model | parser
graph = create_react_agent(model, tools=tools)
input = "what is the weather in sf"
input_dict = {"messages": [("user", "what is the weather in sf")]}

def graph_stream():
    print("graph_stream")
    for s in graph.stream(input_dict, stream_mode="values"):
        print(s)
    print("\n")

def chain_stream():
    print("chain_stream")
    for s in chain.stream(input):
        print(s)
    print("\n")

def model_stream():
    print("model_stream")
    for s in model.stream(input):
        print(s)
    print("\n")

# async methods
async def graph_astream():
    print("graph_astream")
    async for s in graph.astream(input_dict, stream_mode="values"):
        print(s)
    print("\n")

async def chain_astream():
    print("chain_astream")
    async for s in chain.astream(input):
        print(s)
    print("\n")

async def model_astream():
    print("model_astream")
    async for s in model.astream(input):
        print(s)
    print("\n")

async def model_ainvoke():
    print("model_ainvoke")
    a = await model.ainvoke(input)
    print(a)

async def graph_astream_event():
    print("graph_astream_event")
    async for s in graph.astream_events(input_dict, version="v2"):
        print(s)
    print("\n")

# all 3 of these work with both gemini and gpt-4o
graph_stream()
chain_stream()
model_stream()

# none of these work with gemini, but all work with gpt-4o
# asyncio.run(graph_astream())
# asyncio.run(chain_astream())
# asyncio.run(model_astream())
# asyncio.run(model_ainvoke())
asyncio.run(graph_astream_event())

Here's the stacktrace: graph_astream, similar trace for model_ainvoke

{'messages': [HumanMessage(content='what is the weather in sf', id='f2dfae8e-b18b-46f0-a6d4-f51c8b8c6066')]}
Traceback (most recent call last):
  File "/home/bai/app/gemini_test.py", line 87, in <module>
    asyncio.run(graph_astream())
  File "/usr/local/lib/python3.12/asyncio/runners.py", line 194, in run
    return runner.run(main)
           ^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/asyncio/runners.py", line 118, in run
    return self._loop.run_until_complete(task)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/asyncio/base_events.py", line 687, in run_until_complete
    return future.result()
           ^^^^^^^^^^^^^^^
  File "/home/bai/app/gemini_test.py", line 54, in graph_astream
    async for s in graph.astream(input_dict, stream_mode="values"):
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langgraph/pregel/__init__.py", line 1431, in astream
    _panic_or_proceed(done, inflight, step)
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langgraph/pregel/__init__.py", line 1643, in _panic_or_proceed
    raise exc
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langgraph/pregel/retry.py", line 120, in arun_with_retry
    await task.proc.ainvoke(task.input, task.config)
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 2541, in ainvoke
    input = await step.ainvoke(input, config, **kwargs)
            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 4003, in ainvoke
    return await self._acall_with_config(
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 1649, in _acall_with_config
    output: Output = await asyncio.create_task(coro, context=context)  # type: ignore
                     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 3950, in _ainvoke
    output = await acall_func_with_variable_args(
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langgraph/prebuilt/chat_agent_executor.py", line 417, in acall_model
    response = await model_runnable.ainvoke(messages, config)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 4598, in ainvoke
    return await self.bound.ainvoke(
           ^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py", line 269, in ainvoke
    llm_result = await self.agenerate_prompt(
                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py", line 691, in agenerate_prompt
    return await self.agenerate(
           ^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py", line 651, in agenerate
    raise exceptions[0]
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py", line 836, in _agenerate_with_cache
    result = await self._agenerate(
             ^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_google_genai/chat_models.py", line 789, in _agenerate
    raise RuntimeError(
RuntimeError: Initialize ChatGoogleGenerativeAI with a running event loop to use async methods.

chain_astream, similar trace for model_astream, graph_astream_event

Traceback (most recent call last):
  File "/home/bai/app/gemini_test.py", line 87, in <module>
    asyncio.run(chain_astream())
  File "/usr/local/lib/python3.12/asyncio/runners.py", line 194, in run
    return runner.run(main)
           ^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/asyncio/runners.py", line 118, in run
    return self._loop.run_until_complete(task)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/local/lib/python3.12/asyncio/base_events.py", line 687, in run_until_complete
    return future.result()
           ^^^^^^^^^^^^^^^
  File "/home/bai/app/gemini_test.py", line 59, in chain_astream
    async for s in chain.astream(input):
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 2908, in astream
    async for chunk in self.atransform(input_aiter(), config, **kwargs):
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 2891, in atransform
    async for chunk in self._atransform_stream_with_config(
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 1974, in _atransform_stream_with_config
    chunk: Output = await asyncio.create_task(  # type: ignore[call-arg]
                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 2861, in _atransform
    async for output in final_pipeline:
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/output_parsers/transform.py", line 60, in atransform
    async for chunk in self._atransform_stream_with_config(
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 1932, in _atransform_stream_with_config
    final_input: Optional[Input] = await py_anext(input_for_tracing, None)
                                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/utils/aiter.py", line 65, in anext_impl
    return await __anext__(iterator)
           ^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/utils/aiter.py", line 100, in tee_peer
    item = await iterator.__anext__()
           ^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/runnables/base.py", line 1215, in atransform
    async for output in self.astream(final, config, **kwargs):
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py", line 417, in astream
    raise e
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py", line 395, in astream
    async for chunk in self._astream(
  File "/home/bai/.cache/pypoetry/virtualenvs/gemini_64LGGN_v-py3.12/lib/python3.12/site-packages/langchain_google_genai/chat_models.py", line 871, in _astream
    generation_method=self.async_client.stream_generate_content,
                      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'stream_generate_content'
nehas0326 commented 1 month ago

Even I am facing this issue. This was added only a month ago: (https://app.semanticdiff.com/gh/langchain-ai/langchain-google/pull/244/overview) It says "require that async client be initialized inside a running event loop" for RuntimeError: Initialize ChatGoogleGenerativeAI with a running event loop to use async methods @schoennenbeck's solution here looks like a similar approach. @eyurtsev Request you to suggest a solution.

Grecil commented 1 month ago

Any solutions yet?

Grecil commented 1 month ago

@lkuligin @vbarda kindly look into the matter.

s99100532 commented 1 month ago

@Grecil For temp solution, u can use litellm to call genai, the implementation does not require async_client

from langchain_community.chat_models import ChatLiteLLM
chat_llm = ChatLiteLLM(
    model="gemini/gemini-pro",
)
nehas0326 commented 1 month ago

@s99100532 Why is my google api key not working with ChatLiteLLM? The same key is working with ChatGoogleGenerativeAI. I am getting following error:

litellm.llms.vertex_httpx.VertexAIError: {
  "error": {
    "code": 400,
    "message": "API key not valid. Please pass a valid API key.",
    "status": "INVALID_ARGUMENT",
    "details": [
      {
        "@type": "type.googleapis.com/google.rpc.ErrorInfo",
        "reason": "API_KEY_INVALID",
        "domain": "googleapis.com",
        "metadata": {
          "service": "generativelanguage.googleapis.com"
        }
      }
    ]
  }
}

Thanks in advance.

Grecil commented 1 month ago

@nehas0326 You need to declare model like this -

llm=ChatLiteLLM(model="gemini/gemini-1.5-flash")

you are probably declaring model like this, which is pulling the model from Vertex AI, instead of Gemini API -

llm=ChatLiteLLM(model="gemini-1.5-flash")
nehas0326 commented 1 month ago

llm = ChatLiteLLM(model="gemini/gemini-1.5-pro-latest") This is how I have declared.

Grecil commented 1 month ago

@nehas0326 what version of litellm are you using? (use "pip list" or "pip freeze")

nehas0326 commented 1 month ago

litellm 1.41.19

Grecil commented 1 month ago

I cannot think of any other reason as to why litellm would call Vertex AI API instead of Gemini API. I am not able to replicate the same error you are getting on my device with the same litellm version. You should probably check your code again. image

nehas0326 commented 1 month ago

Thanks for trying though.

nehas0326 commented 1 month ago

Created issue

s99100532 commented 1 month ago

@nehas0326 since litellm use different way to connect to genai, the behaviour is expected to be different, I think it related to the key setup rather than code issues

nehas0326 commented 1 month ago

@Grecil how have you setup your key? I have 2 keys in the same google project. I created another new project in google cloud and created a newest key to it. It is still throwing the same error. :(

Grecil commented 1 month ago

I am running it on local windows machine instead of virtual environment (not a good practice ik). I set up the key using the given command in terminal -

setx GOOGLE_API_KEY "insert your key here"
Grecil commented 1 month ago

@nehas0326 you shouldn't share api keys publicly. Unfortunately, I don't have my laptop with me rn. I will revert to you in 2-3 hours when I get home.

nehas0326 commented 1 month ago

I understand about the keys but I'm in a pickle right now. The error also shows this:

httpx.HTTPStatusError: Client error '400 Bad Request' for url 'https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro:generateContent?key=None'

Looks like api key is not getting passed to the url.

s99100532 commented 1 month ago

@nehas0326 try share secret by secure way, e.g. share your private repo. Showing secret in public is a terrible practice in programming world💩

Grecil commented 1 month ago

@nehas0326 since you are in a hurry, you can downgrade to langchain_google_genai==1 0.3 Unfortunately, this version does not support system instructions so you need to apply this commit yourself if you want system instructions - https://github.com/langchain-ai/langchain-google/commit/60331a9418bd6f91fbcbf817fdada8f6f7a27ac5

nehas0326 commented 1 month ago

I think "key=None" in url https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro:generateContent?key=None is creating the error. Is it not able to read my environment variable?

s99100532 commented 1 month ago

https://litellm.vercel.app/docs/providers/gemini

Seems like they use a different env name to read the api key

nehas0326 commented 1 month ago

Worked like magic. Thanks @s99100532 👍

s99100532 commented 1 month ago

I am able to run async method using async client

def create_chat_llm():
    chat_llm_google = ChatGoogleGenerativeAI(
        model="gemini-1.5-pro",
    )
    return chat_llm_google

async def run_llm_async(info):
    return create_chat_llm()

llm = RunnableLambda(run_llm_async)

# Use it as normal Runnable
# e.g.
# chain = llm | StrOutputParser()
nehas0326 commented 1 month ago

This works perfectly well @s99100532 💯

azliabdullah commented 1 month ago

Thank you guys for the suggestions and workarounds. While waiting for an official response (will there be any?), found another issue which is more critical https://github.com/langchain-ai/langchain-google/issues/369. Kindly let me know if you all can reproduce or have solutions

azliabdullah commented 1 month ago

I am able to run async method using async client

def create_chat_llm():
    chat_llm_google = ChatGoogleGenerativeAI(
        model="gemini-1.5-pro",
    )
    return chat_llm_google

async def run_llm_async(info):
    return create_chat_llm()

llm = RunnableLambda(run_llm_async)

# Use it as normal Runnable
# e.g.
# chain = llm | StrOutputParser()

@s99100532 i tried this but received this error

from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda

def create_chat_llm():
    chat_llm_google = ChatGoogleGenerativeAI(
        model="gemini-1.5-pro",
    )
    return chat_llm_google

async def run_llm_async(info):
    return create_chat_llm()

llm = RunnableLambda(run_llm_async)

# Use it as normal Runnable
# e.g.
chain = llm | StrOutputParser()
chain.ainvoke("what is langchain?")

the error

RuntimeWarning: coroutine 'RunnableSequence.ainvoke' was never awaited
  chain.ainvoke("what is langchain?")
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
s99100532 commented 1 month ago

I am able to run async method using async client


def create_chat_llm():

    chat_llm_google = ChatGoogleGenerativeAI(

        model="gemini-1.5-pro",

    )

    return chat_llm_google

async def run_llm_async(info):

    return create_chat_llm()

llm = RunnableLambda(run_llm_async)

# Use it as normal Runnable

# e.g.

# chain = llm | StrOutputParser()

@s99100532 i tried this but received this error


from langchain_google_genai import ChatGoogleGenerativeAI

from langchain_core.output_parsers import StrOutputParser

from langchain_core.runnables import RunnableLambda

def create_chat_llm():

    chat_llm_google = ChatGoogleGenerativeAI(

        model="gemini-1.5-pro",

    )

    return chat_llm_google

async def run_llm_async(info):

    return create_chat_llm()

llm = RunnableLambda(run_llm_async)

# Use it as normal Runnable

# e.g.

chain = llm | StrOutputParser()

chain.ainvoke("what is langchain?")

the error


RuntimeWarning: coroutine 'RunnableSequence.ainvoke' was never awaited

  chain.ainvoke("what is langchain?")

RuntimeWarning: Enable tracemalloc to get the object allocation traceback

https://python.langchain.com/v0.1/docs/expression_language/interface/#async-invoke

https://superfastpython.com/python-async-function/#:~:text=Awaitable%20in%20Python-,How%20to%20Run%20an%20Async%20Function%20From%20Python,IO%20operations%2C%20and%20run%20subprocesses.

U should read the doc(or ask llm) about how to use async function in langchain/python

Chengdyc commented 1 month ago

I'm seeing a different error message with astream, astream_log method but the result is still exception. the exception says "TypeError: object _StreamingResponseIterator can't be used in 'await' expression"

code to reproduce the error:

llm = ChatGoogleGenerativeAI(
    model="gemini-1.5-flash-latest",
    google_api_key=SecretStr(os.getenv("GOOGLE_API_KEY")),
    client_options=None,
    transport="grpc",
    additional_headers=None,
    client=None,
    async_client=None,
    temperature=0.5,
)

chain = llm | StrOutputParser()

generator = chain.astream("What is the weather")

async for chunk in generator:
    print(chunk)

last few frames from the stacktrace:

File ~/my_workspace/venv/lib/python3.11/site-packages/tenacity/_asyncio.py:61, in AsyncRetrying.__call__(self, fn, *args, **kwargs)
     [59](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/tenacity/_asyncio.py:59) if isinstance(do, DoAttempt):
     [60](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/tenacity/_asyncio.py:60)     try:
---> [61](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/tenacity/_asyncio.py:61)         result = await fn(*args, **kwargs)
     [62](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/tenacity/_asyncio.py:62)     except BaseException:  # noqa: B902
     [63](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/tenacity/_asyncio.py:63)         retry_state.set_exception(sys.exc_info())  # type: ignore[arg-type]

File ~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:227, in _achat_with_retry.<locals>._achat_with_retry(**kwargs)
    [223](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:223)     raise ChatGoogleGenerativeAIError(
    [224](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:224)         f"Invalid argument provided to Gemini: {e}"
    [225](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:225)     ) from e
    [226](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:226) except Exception as e:
--> [227](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:227)     raise e

File ~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:220, in _achat_with_retry.<locals>._achat_with_retry(**kwargs)
    [217](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:217) @retry_decorator
    [218](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:218) async def _achat_with_retry(**kwargs: Any) -> Any:
    [219](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:219)     try:
--> [220](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:220)         return await generation_method(**kwargs)
    [221](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:221)     except InvalidArgument as e:
    [222](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:222)         # Do not retry for these errors.
    [223](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:223)         raise ChatGoogleGenerativeAIError(
    [224](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:224)             f"Invalid argument provided to Gemini: {e}"
    [225](https://some_vscode_generated_url/~/my_workspace/venv/lib/python3.11/site-packages/langchain_google_genai/chat_models.py:225)         ) from e

TypeError: object _StreamingResponseIterator can't be used in 'await' expression

I'm using langchain-google-genai 1.0.7.

for now, I'm using the LiteLLM workaround proposed by others.

s99100532 commented 1 month ago

@Chengdyc read the comment above if u are not using asyncio

Chengdyc commented 1 month ago

could you elaborate? I understand that ainvoke() returns a coroutine that needs to be awaited, similarly astream() returns an async generator that we need to use async for to iterate.

the following code gives an error as well.

chain = llm | StrOutputParser()

resp = await chain.ainvoke("What is the weather")

error:

TypeError: object GenerateContentResponse can't be used in 'await' expression
s99100532 commented 1 month ago

@Chengdyc asyncio is a library to run async function but seems u do not use it