Originally posted by **rvasa779** June 13, 2024
### Checked other resources
- [X] I added a very descriptive title to this question.
- [X] I searched the LangChain documentation with the integrated search.
- [X] I used the GitHub search to find a similar question and didn't find it.
### Commit to Help
- [X] I commit to help with one of those options π
### Example Code
```python
import os
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.runnables import ConfigurableField
from langchain_google_genai import (ChatGoogleGenerativeAI, HarmBlockThreshold,
HarmCategory)
from langchain_openai import AzureChatOpenAI
# Setting up APIs
# Azure OpenAI API
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
OPENAI_DEPLOYMENT_ENDPOINT = os.environ["AZURE_OPENAI_ENDPOINT"]
OPENAI_DEPLOYMENT_NAME = os.environ["OPENAI_DEPLOYMENT_NAME"]
OPENAI_DEPLOYMENT_VERSION = os.environ["OPENAI_API_VERSION"]
# Models
llm = AzureChatOpenAI(azure_endpoint=OPENAI_DEPLOYMENT_ENDPOINT,
deployment_name=OPENAI_DEPLOYMENT_NAME,
openai_api_version=OPENAI_DEPLOYMENT_VERSION,
openai_api_key=OPENAI_API_KEY, verbose=True,
request_timeout=60, temperature=0.8).configurable_alternatives(
# This gives this field an id
# When configuring the end runnable, we can then use this id to configure this field
ConfigurableField(id="llm"),
# This sets a default_key.
# If we specify this key, the default LLM (ChatAnthropic initialized above) will be used
default_key="azure",
# This adds a new option, with name `openai` that is equal to `ChatOpenAI()`
gemini=ChatGoogleGenerativeAI(
model="gemini-pro",
google_api_key="",
convert_system_message_to_human=True,
max_tokens=16384,
temperature=0.7,
safety_settings={
HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
},
),
)
app = FastAPI(title="Joke Engine", version="1.0.0")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
##Generic home end point
@app.get("/")
def home():
return {"msg": "App is running successfully"}
@app.get("/joke")
async def run_agent(topic: str):
prompt = PromptTemplate(
input_variables=["input",],
template="""
Given a user input, tell a joke about the below topic:
{input}
"""
)
chain = prompt | llm
return chain.with_config(configurable={"llm": "gemini"}).invoke({'input':topic})
```
### Description
I am using ChatGoogleGenerativeAI along with AzureOpenAI in configurable mode.
I have written a simple fastapi to write a joke on user given topic. When I run the above code configuring my "llm" parameter as azure it works fine. However, when I run the same code with "llm" parameter as gemini the code fails with the below error:
`[2024-06-14T05:30:47.913Z] System.Private.CoreLib: Exception while executing function: Functions.HttpTrigger1. System.Private.CoreLib: Result: Failure
Exception: AttributeError: 'int' object has no attribute 'name'
Stack: File "C:\Program Files\Microsoft\Azure Functions Core Tools\workers\python\3.9\WINDOWS\X64\azure_functions_worker\dispatcher.py", line 479, in _handle__invocation_request
call_result = await self._loop.run_in_executor(
File "C:\Users\Digital\AppData\Local\Programs\Python\Python39\lib\concurrent\futures\thread.py", line 52, in run
result = self.fn(*self.args, **self.kwargs)
File "C:\Program Files\Microsoft\Azure Functions Core Tools\workers\python\3.9\WINDOWS\X64\azure_functions_worker\dispatcher.py", line 752, in _run_sync_func
return ExtensionManager.get_sync_invocation_wrapper(context,
File "C:\Program Files\Microsoft\Azure Functions Core Tools\workers\python\3.9\WINDOWS\X64\azure_functions_worker\extension.py", line 215, in _raw_invocation_wrapper
result = function(**args)
File "\egg-gemini\HttpTrigger1\__init__.py", line 19, in main
return func.AsgiMiddleware(app).handle(req, context)
File "C:\Program Files\Microsoft\Azure Functions Core Tools\workers\python\3.9\WINDOWS\X64\azure\functions\_http_asgi.py", line 172, in handle
return self._handle(req, context)
File "C:\Program Files\Microsoft\Azure Functions Core Tools\workers\python\3.9\WINDOWS\X64\azure\functions\_http_asgi.py", line 177, in _handle
asgi_response = asyncio.run(
File "C:\Users\Digital\AppData\Local\Programs\Python\Python39\lib\asyncio\runners.py", line 44, in run
return loop.run_until_complete(main)
File "C:\Users\Digital\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py", line 642, in run_until_complete
return future.result()
File "C:\Program Files\Microsoft\Azure Functions Core Tools\workers\python\3.9\WINDOWS\X64\azure\functions\_http_asgi.py", line 80, in from_app
await app(scope, res._receive, res._send)
File "\egg-gemini\.venv\lib\site-packages\fastapi\applications.py", line 1054, in __call__
await super().__call__(scope, receive, send)
File "\egg-gemini\.venv\lib\site-packages\starlette\applications.py", line 123, in __call__
await self.middleware_stack(scope, receive, send)
File "\egg-gemini\.venv\lib\site-packages\starlette\middleware\errors.py", line 186, in __call__
raise exc
File "\egg-gemini\.venv\lib\site-packages\starlette\middleware\errors.py", line 164, in __call__
await self.app(scope, receive, _send)
File "\egg-gemini\.venv\lib\site-packages\starlette\middleware\cors.py", line 85, in __call__
await self.app(scope, receive, send)
File "\egg-gemini\.venv\lib\site-packages\starlette\middleware\exceptions.py", line 65, in __call__
await wrap_app_handling_exceptions(self.app, conn)(scope, receive, send)
File "\egg-gemini\.venv\lib\site-packages\starlette\_exception_handler.py", line 64, in wrapped_app
raise exc
File "\egg-gemini\.venv\lib\site-packages\starlette\_exception_handler.py", line 53, in wrapped_app
await app(scope, receive, sender)
File "\egg-gemini\.venv\lib\site-packages\starlette\routing.py", line 756, in __call__
await self.middleware_stack(scope, receive, send)
File "\egg-gemini\.venv\lib\site-packages\starlette\routing.py", line 776, in app
await route.handle(scope, receive, send)
File "\egg-gemini\.venv\lib\site-packages\starlette\routing.py", line 297, in handle
await self.app(scope, receive, send)
File "\egg-gemini\.venv\lib\site-packages\starlette\routing.py", line 77, in app
await wrap_app_handling_exceptions(app, request)(scope, receive, send)
File "\egg-gemini\.venv\lib\site-packages\starlette\_exception_handler.py", line 64, in wrapped_app
raise exc
File "\egg-gemini\.venv\lib\site-packages\starlette\_exception_handler.py", line 53, in wrapped_app
await app(scope, receive, sender)
File "\egg-gemini\.venv\lib\site-packages\starlette\routing.py", line 72, in app
response = await func(request)
File "\egg-gemini\.venv\lib\site-packages\fastapi\routing.py", line 278, in app
raw_response = await run_endpoint_function(
File "\egg-gemini\.venv\lib\site-packages\fastapi\routing.py", line 191, in run_endpoint_function
return await dependant.call(**values)
File "\egg-gemini\src\__init__.py", line 100, in run_agent
return chain.with_config(configurable={"llm": "gemini"}).invoke({'input':topic})
File "\egg-gemini\.venv\lib\site-packages\langchain_core\runnables\base.py", line 4573, in invoke
return self.bound.invoke(
File "\egg-gemini\.venv\lib\site-packages\langchain_core\runnables\base.py", line 2504, in invoke
input = step.invoke(input, config)
File "\egg-gemini\.venv\lib\site-packages\langchain_core\runnables\configurable.py", line 117, in invoke
return runnable.invoke(input, config, **kwargs)
File "\egg-gemini\.venv\lib\site-packages\langchain_core\language_models\chat_models.py", line 170, in invoke
self.generate_prompt(
File "\egg-gemini\.venv\lib\site-packages\langchain_core\language_models\chat_models.py", line 599, in generate_prompt
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File "\egg-gemini\.venv\lib\site-packages\langchain_core\language_models\chat_models.py", line 456, in generate
raise e
File "\egg-gemini\.venv\lib\site-packages\langchain_core\language_models\chat_models.py", line 446, in generate
self._generate_with_cache(
File "\egg-gemini\.venv\lib\site-packages\langchain_core\language_models\chat_models.py", line 671, in _generate_with_cache
result = self._generate(
File "\egg-gemini\.venv\lib\site-packages\langchain_google_genai\chat_models.py", line 766, in _generate
return _response_to_result(response)
File "\egg-gemini\.venv\lib\site-packages\langchain_google_genai\chat_models.py", line 551, in _response_to_result
generation_info["finish_reason"] = candidate.finish_reason.name`
Attached is my requirement.txt file too
[requirements.txt](https://github.com/user-attachments/files/15832125/requirements.txt)
### System Info
System Information
------------------
> OS: Windows
> OS Version: 10.0.19041
> Python Version: 3.9.0 (tags/v3.9.0:9cf6752, Oct 5 2020, 15:34:40) [MSC v.1927 64 bit (AMD64)]
Package Information
-------------------
> langsmith: 0.1.77
> langchain_google_genai: 1.0.6
> langchain_openai: 0.1.8
> langchain_text_splitters: 0.2.1
Packages not installed (Not Necessarily a Problem)
--------------------------------------------------
The following packages were not found:
> langgraph
> langserve
Discussed in https://github.com/langchain-ai/langchain/discussions/22882