Open rohan-uiuc opened 1 year ago
3 agents ALL FAILED with runtime exceptions: runtime_exceptions: ['Traceback (most recent call last):\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/ai_ta_backend/agents/github_agent.py", line 187, in bot_runner_with_retries\n result = bot.run(f"{run_instruction}\n{warning_to_bot}")\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 507, in run\n return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 312, in call\n raise e\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 306, in call\n self._call(inputs, run_manager=run_manager)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/agents/agent.py", line 1127, in _call\n next_step_output = self._take_next_step(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/agents/agent.py", line 924, in _take_next_step\n output = self.agent.plan(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/agents/agent.py", line 536, in plan\n full_output = self.llm_chain.predict(callbacks=callbacks, full_inputs)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/llm.py", line 257, in predict\n return self(kwargs, callbacks=callbacks)[self.output_key]\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 312, in call\n raise e\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 306, in call\n self._call(inputs, run_manager=run_manager)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/llm.py", line 93, in _call\n response = self.generate([inputs], run_manager=run_manager)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/llm.py", line 103, in generate\n return self.llm.generate_prompt(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 475, in generate_prompt\n return self.generate(prompt_messages, stop=stop, callbacks=callbacks, kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 365, in generate\n raise e\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 355, in generate\n self._generate_with_cache(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 507, in _generate_with_cache\n return self._generate(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/openai.py", line 345, in _generate\n response = self.completion_with_retry(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/openai.py", line 284, in completion_with_retry\n return _completion_with_retry(kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 289, in wrapped_f\n return self(f, *args, *kw)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 379, in call\n do = self.iter(retry_state=retry_state)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 325, in iter\n raise retry_exc.reraise()\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 158, in reraise\n raise self.last_attempt.result()\n File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/concurrent/futures/_base.py", line 437, in result\n return self.get_result()\n File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/concurrent/futures/_base.py", line 389, in get_result\n raise self._exception\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 382, in call\n result = fn(args, kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/openai.py", line 282, in _completion_with_retry\n return self.client.create(kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_resources/chat_completion.py", line 25, in create\n return super().create(args, kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_resources/abstract/engine_apiresource.py", line 155, in create\n response, , api_key = requestor.request(\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_requestor.py", line 299, in request\n resp, got_stream = self._interpret_response(result, stream)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_requestor.py", line 710, in _interpret_response\n self._interpret_response_line(\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_requestor.py", line 775, in _interpret_response_line\n raise self.handle_error_response(\nopenai.error.RateLimitError: Rate limit reached for 10KTPM-200RPM in organization org-UKLZPKkZ2kzEAUEUxXTxkCbN on tokens per min. Limit: 10000 / min. Please try again in 6ms. Contact us through our help center at help.openai.com if you continue to have issues.\n', 'Traceback (most recent call last):\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/ai_ta_backend/agents/github_agent.py", line 187, in bot_runner_with_retries\n result = bot.run(f"{run_instruction}\n{warning_to_bot}")\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 507, in run\n return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 312, in call\n raise e\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 306, in call\n self._call(inputs, run_manager=run_manager)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/agents/agent.py", line 1127, in _call\n next_step_output = self._take_next_step(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/agents/agent.py", line 924, in _take_next_step\n output = self.agent.plan(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/agents/agent.py", line 536, in plan\n full_output = self.llm_chain.predict(callbacks=callbacks, full_inputs)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/llm.py", line 257, in predict\n return self(kwargs, callbacks=callbacks)[self.output_key]\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 312, in call\n raise e\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 306, in call\n self._call(inputs, run_manager=run_manager)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/llm.py", line 93, in _call\n response = self.generate([inputs], run_manager=run_manager)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/llm.py", line 103, in generate\n return self.llm.generate_prompt(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 475, in generate_prompt\n return self.generate(prompt_messages, stop=stop, callbacks=callbacks, kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 365, in generate\n raise e\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 355, in generate\n self._generate_with_cache(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 507, in _generate_with_cache\n return self._generate(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/openai.py", line 345, in _generate\n response = self.completion_with_retry(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/openai.py", line 284, in completion_with_retry\n return _completion_with_retry(kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 289, in wrapped_f\n return self(f, args, kw)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 379, in call\n do = self.iter(retry_state=retry_state)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 325, in iter\n raise retry_exc.reraise()\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 158, in reraise\n raise self.last_attempt.result()\n File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/concurrent/futures/_base.py", line 437, in result\n return self.get_result()\n File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/concurrent/futures/_base.py", line 389, in get_result\n raise self._exception\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 382, in call\n result = fn(args, kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/openai.py", line 282, in _completion_with_retry\n return self.client.create(kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_resources/chat_completion.py", line 25, in create\n return super().create(args, kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_resources/abstract/engine_apiresource.py", line 155, in create\n response, , api_key = requestor.request(\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_requestor.py", line 299, in request\n resp, got_stream = self._interpret_response(result, stream)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_requestor.py", line 710, in _interpret_response\n self._interpret_response_line(\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_requestor.py", line 775, in _interpret_response_line\n raise self.handle_error_response(\nopenai.error.RateLimitError: Rate limit reached for 10KTPM-200RPM in organization org-UKLZPKkZ2kzEAUEUxXTxkCbN on tokens per min. Limit: 10000 / min. Please try again in 6ms. Contact us through our help center at help.openai.com if you continue to have issues.\n', 'Traceback (most recent call last):\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/ai_ta_backend/agents/github_agent.py", line 187, in bot_runner_with_retries\n result = bot.run(f"{run_instruction}\n{warning_to_bot}")\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 507, in run\n return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 312, in call\n raise e\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 306, in call\n self._call(inputs, run_manager=run_manager)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/agents/agent.py", line 1127, in _call\n next_step_output = self._take_next_step(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/agents/agent.py", line 924, in _take_next_step\n output = self.agent.plan(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/agents/agent.py", line 536, in plan\n full_output = self.llm_chain.predict(callbacks=callbacks, full_inputs)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/llm.py", line 257, in predict\n return self(kwargs, callbacks=callbacks)[self.output_key]\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 312, in call\n raise e\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/base.py", line 306, in call\n self._call(inputs, run_manager=run_manager)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/llm.py", line 93, in _call\n response = self.generate([inputs], run_manager=run_manager)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chains/llm.py", line 103, in generate\n return self.llm.generate_prompt(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 475, in generate_prompt\n return self.generate(prompt_messages, stop=stop, callbacks=callbacks, kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 365, in generate\n raise e\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 355, in generate\n self._generate_with_cache(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/base.py", line 507, in _generate_with_cache\n return self._generate(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/openai.py", line 345, in _generate\n response = self.completion_with_retry(\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/openai.py", line 284, in completion_with_retry\n return _completion_with_retry(kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 289, in wrapped_f\n return self(f, *args, kw)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 379, in call\n do = self.iter(retry_state=retry_state)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 325, in iter\n raise retry_exc.reraise()\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 158, in reraise\n raise self.last_attempt.result()\n File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/concurrent/futures/_base.py", line 437, in result\n return self.get_result()\n File "/usr/local/opt/python@3.8/Frameworks/Python.framework/Versions/3.8/lib/python3.8/concurrent/futures/_base.py", line 389, in get_result\n raise self._exception\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/tenacity/init.py", line 382, in call\n result = fn(args, kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ncsa/langchain-improved-agents/libs/langchain/langchain/chat_models/openai.py", line 282, in _completion_with_retry\n return self.client.create(kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_resources/chat_completion.py", line 25, in create\n return super().create(args, kwargs)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_resources/abstract/engine_apiresource.py", line 155, in create\n response, , api_key = requestor.request(\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_requestor.py", line 299, in request\n resp, got_stream = self._interpret_response(result, stream)\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_requestor.py", line 710, in _interpret_response\n self._interpret_response_line(\n File "/Users/rohanmarwaha/IdeaProjects/ai-ta-backend/final_env/lib/python3.8/site-packages/openai/api_requestor.py", line 775, in _interpret_response_line\n raise self.handle_error_response(\nopenai.error.RateLimitError: Rate limit reached for 10KTPM-200RPM in organization org-UKLZPKkZ2kzEAUEUxXTxkCbN on tokens per min. Limit: 10000 / min. Please try again in 6ms. Contact us through our help center at help.openai.com if you continue to have issues.\n']
Thanks for opening a new issue! I'll now try to finish this implementation and open a PR for you to review. I'll comment if I get blocked or 'request your review' if I think I'm successful. So just watch for emails while I work. Please comment to give me additional instructions.