tonykipkemboi / ollama_pdf_rag

A demo Jupyter Notebook showcasing a simple local RAG (Retrieval Augmented Generation) pipeline to chat with your PDFs.
MIT License
180 stars 85 forks source link

chain.invoke(input("")) results in Remote end closed connection without response. #3

Closed saiaprameya closed 5 months ago

saiaprameya commented 5 months ago

Can you please suggest whats going on here

I do see ollama service running.

Ran this command to check if ollama service is fine and it works: curl http://localhost:11434/api/generate -d '{"model": "mistral"}' to check and it works fine.

But when i run the last step of your notebook i get the following error:

----> 1 chain.invoke(input(""))

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/runnables/base.py:2499, in RunnableSequence.invoke(self, input, config) 2497 try: 2498 for i, step in enumerate(self.steps): -> 2499 input = step.invoke( 2500 input, 2501 # mark each step as a child run 2502 patch_config( 2503 config, callbacks=run_manager.get_child(f"seq:step:{i+1}") 2504 ), 2505 ) 2506 # finish the root run 2507 except BaseException as e:

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:158, in BaseChatModel.invoke(self, input, config, stop, kwargs) 147 def invoke( 148 self, 149 input: LanguageModelInput, (...) 153 kwargs: Any, 154 ) -> BaseMessage: 155 config = ensure_config(config) 156 return cast( 157 ChatGeneration, --> 158 self.generate_prompt( 159 [self._convert_input(input)], 160 stop=stop, 161 callbacks=config.get("callbacks"), 162 tags=config.get("tags"), 163 metadata=config.get("metadata"), 164 run_name=config.get("run_name"), 165 run_id=config.pop("run_id", None), 166 **kwargs, 167 ).generations[0][0], 168 ).message

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:560, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, kwargs) 552 def generate_prompt( 553 self, 554 prompts: List[PromptValue], (...) 557 kwargs: Any, 558 ) -> LLMResult: 559 prompt_messages = [p.to_messages() for p in prompts] --> 560 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:421, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs) 419 if run_managers: 420 run_managers[i].on_llm_error(e, response=LLMResult(generations=[])) --> 421 raise e 422 flattened_outputs = [ 423 LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item] 424 for res in results 425 ] 426 llm_output = self._combine_llm_outputs([res.llm_output for res in results])

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:411, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, kwargs) 408 for i, m in enumerate(messages): 409 try: 410 results.append( --> 411 self._generate_with_cache( 412 m, 413 stop=stop, 414 run_manager=run_managers[i] if run_managers else None, 415 kwargs, 416 ) 417 ) 418 except BaseException as e: 419 if run_managers:

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:632, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, kwargs) 630 else: 631 if inspect.signature(self._generate).parameters.get("run_manager"): --> 632 result = self._generate( 633 messages, stop=stop, run_manager=run_manager, kwargs 634 ) 635 else: 636 result = self._generate(messages, stop=stop, **kwargs)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_community/chat_models/ollama.py:259, in ChatOllama._generate(self, messages, stop, run_manager, kwargs) 235 def _generate( 236 self, 237 messages: List[BaseMessage], (...) 240 kwargs: Any, 241 ) -> ChatResult: 242 """Call out to Ollama's generate endpoint. 243 244 Args: (...) 256 ]) 257 """ --> 259 final_chunk = self._chat_stream_with_aggregation( 260 messages, 261 stop=stop, 262 run_manager=run_manager, 263 verbose=self.verbose, 264 **kwargs, 265 ) 266 chat_generation = ChatGeneration( 267 message=AIMessage(content=final_chunk.text), 268 generation_info=final_chunk.generation_info, 269 ) 270 return ChatResult(generations=[chat_generation])

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_community/chat_models/ollama.py:190, in ChatOllama._chat_stream_with_aggregation(self, messages, stop, run_manager, verbose, kwargs) 181 def _chat_stream_with_aggregation( 182 self, 183 messages: List[BaseMessage], (...) 187 kwargs: Any, 188 ) -> ChatGenerationChunk: 189 final_chunk: Optional[ChatGenerationChunk] = None --> 190 for stream_resp in self._create_chat_stream(messages, stop, **kwargs): 191 if stream_resp: 192 chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_community/chat_models/ollama.py:162, in ChatOllama._create_chat_stream(self, messages, stop, kwargs) 152 def _create_chat_stream( 153 self, 154 messages: List[BaseMessage], 155 stop: Optional[List[str]] = None, 156 kwargs: Any, 157 ) -> Iterator[str]: 158 payload = { 159 "model": self.model, 160 "messages": self._convert_messages_to_ollama_messages(messages), 161 } --> 162 yield from self._create_stream( 163 payload=payload, stop=stop, api_url=f"{self.base_url}/api/chat", **kwargs 164 )

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_community/llms/ollama.py:231, in _OllamaCommon._create_stream(self, api_url, payload, stop, kwargs) 224 else: 225 request_payload = { 226 "prompt": payload.get("prompt"), 227 "images": payload.get("images", []), 228 params, 229 } --> 231 response = requests.post( 232 url=api_url, 233 headers={ 234 "Content-Type": "application/json", 235 **(self.headers if isinstance(self.headers, dict) else {}), 236 }, 237 json=request_payload, 238 stream=True, 239 timeout=self.timeout, 240 ) 241 response.encoding = "utf-8" 242 if response.status_code != 200:

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/api.py:115, in post(url, data, json, kwargs) 103 def post(url, data=None, json=None, kwargs): 104 r"""Sends a POST request. 105 106 :param url: URL for the new :class:Request object. (...) 112 :rtype: requests.Response 113 """ --> 115 return request("post", url, data=data, json=json, **kwargs)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/api.py:59, in request(method, url, kwargs) 55 # By using the 'with' statement we are sure the session is closed, thus we 56 # avoid leaving sockets open which can trigger a ResourceWarning in some 57 # cases, and look like a memory leak in others. 58 with sessions.Session() as session: ---> 59 return session.request(method=method, url=url, kwargs)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/sessions.py:589, in Session.request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json) 584 send_kwargs = { 585 "timeout": timeout, 586 "allow_redirects": allow_redirects, 587 } 588 send_kwargs.update(settings) --> 589 resp = self.send(prep, **send_kwargs) 591 return resp

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/sessions.py:703, in Session.send(self, request, kwargs) 700 start = preferred_clock() 702 # Send the request --> 703 r = adapter.send(request, kwargs) 705 # Total elapsed time of the request (approximately) 706 elapsed = preferred_clock() - start

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/adapters.py:501, in HTTPAdapter.send(self, request, stream, timeout, verify, cert, proxies) 486 resp = conn.urlopen( 487 method=request.method, 488 url=url, (...) 497 chunked=chunked, 498 ) 500 except (ProtocolError, OSError) as err: --> 501 raise ConnectionError(err, request=request) 503 except MaxRetryError as e: 504 if isinstance(e.reason, ConnectTimeoutError): 505 # TODO: Remove this in 3.0.0: see #2811

ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))

In [20]:

In [20]:

tonykipkemboi commented 5 months ago

So this last block of code should give a text box to enter a prompt/question to ask of the PDF. Try passing the question directly in between the quotation marks ("") and let me know if you're still getting the error.

saiaprameya commented 5 months ago

Hi Tony Thanks for the reply.

I did give that last time. Tried again and here is the backtrace:

` ...: chain.invoke(input("")) "what is this about"

/home/qaliavatar/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_community/llms/ollama.py(232)_create_stream() 231 import ipdb; ipdb.set_trace() --> 232 response = requests.post( 233 url=api_url,

ipdb> c OllamaEmbeddings: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 1.10it/s] OllamaEmbeddings: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 43.86it/s] OllamaEmbeddings: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 34.94it/s] OllamaEmbeddings: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 33.49it/s] OllamaEmbeddings: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 36.60it/s]

/home/qaliavatar/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_community/llms/ollama.py(232)_create_stream() 231 import ipdb; ipdb.set_trace() --> 232 response = requests.post( 233 url=api_url,

ipdb> c

RemoteDisconnected Traceback (most recent call last) File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/urllib3/connectionpool.py:715, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw) 714 # Make the request on the httplib connection object. --> 715 httplib_response = self._make_request( 716 conn, 717 method, 718 url, 719 timeout=timeout_obj, 720 body=body, 721 headers=headers, 722 chunked=chunked, 723 ) 725 # If we're going to release the connection in finally:, then 726 # the response doesn't need to know about the connection. Otherwise 727 # it will also try to release it and we'll have a double-release 728 # mess.

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/urllib3/connectionpool.py:467, in HTTPConnectionPool._make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw) 463 except BaseException as e: 464 # Remove the TypeError from the exception chain in 465 # Python 3 (including for exceptions like SystemExit). 466 # Otherwise it looks like a bug in the code. --> 467 six.raise_from(e, None) 468 except (SocketTimeout, BaseSSLError, SocketError) as e:

File :3, in raise_from(value, from_value)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/urllib3/connectionpool.py:462, in HTTPConnectionPool._make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw) 461 try: --> 462 httplib_response = conn.getresponse() 463 except BaseException as e: 464 # Remove the TypeError from the exception chain in 465 # Python 3 (including for exceptions like SystemExit). 466 # Otherwise it looks like a bug in the code.

File ~/anaconda3/envs/privateGPT/lib/python3.11/http/client.py:1390, in HTTPConnection.getresponse(self) 1389 try: -> 1390 response.begin() 1391 except ConnectionError:

File ~/anaconda3/envs/privateGPT/lib/python3.11/http/client.py:325, in HTTPResponse.begin(self) 324 while True: --> 325 version, status, reason = self._read_status() 326 if status != CONTINUE:

File ~/anaconda3/envs/privateGPT/lib/python3.11/http/client.py:294, in HTTPResponse._read_status(self) 291 if not line: 292 # Presumably, the server closed the connection before 293 # sending a valid response. --> 294 raise RemoteDisconnected("Remote end closed connection without" 295 " response") 296 try:

RemoteDisconnected: Remote end closed connection without response

During handling of the above exception, another exception occurred:

ProtocolError Traceback (most recent call last) File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/adapters.py:486, in HTTPAdapter.send(self, request, stream, timeout, verify, cert, proxies) 485 try: --> 486 resp = conn.urlopen( 487 method=request.method, 488 url=url, 489 body=request.body, 490 headers=request.headers, 491 redirect=False, 492 assert_same_host=False, 493 preload_content=False, 494 decode_content=False, 495 retries=self.max_retries, 496 timeout=timeout, 497 chunked=chunked, 498 ) 500 except (ProtocolError, OSError) as err:

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/urllib3/connectionpool.py:799, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw) 797 e = ProtocolError("Connection aborted.", e) --> 799 retries = retries.increment( 800 method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] 801 ) 802 retries.sleep()

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/urllib3/util/retry.py:550, in Retry.increment(self, method, url, response, error, _pool, _stacktrace) 549 if read is False or not self._is_method_retryable(method): --> 550 raise six.reraise(type(error), error, _stacktrace) 551 elif read is not None:

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/urllib3/packages/six.py:769, in reraise(tp, value, tb) 768 if value.traceback is not tb: --> 769 raise value.with_traceback(tb) 770 raise value

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/urllib3/connectionpool.py:715, in HTTPConnectionPool.urlopen(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw) 714 # Make the request on the httplib connection object. --> 715 httplib_response = self._make_request( 716 conn, 717 method, 718 url, 719 timeout=timeout_obj, 720 body=body, 721 headers=headers, 722 chunked=chunked, 723 ) 725 # If we're going to release the connection in finally:, then 726 # the response doesn't need to know about the connection. Otherwise 727 # it will also try to release it and we'll have a double-release 728 # mess.

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/urllib3/connectionpool.py:467, in HTTPConnectionPool._make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw) 463 except BaseException as e: 464 # Remove the TypeError from the exception chain in 465 # Python 3 (including for exceptions like SystemExit). 466 # Otherwise it looks like a bug in the code. --> 467 six.raise_from(e, None) 468 except (SocketTimeout, BaseSSLError, SocketError) as e:

File :3, in raise_from(value, from_value)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/urllib3/connectionpool.py:462, in HTTPConnectionPool._make_request(self, conn, method, url, timeout, chunked, **httplib_request_kw) 461 try: --> 462 httplib_response = conn.getresponse() 463 except BaseException as e: 464 # Remove the TypeError from the exception chain in 465 # Python 3 (including for exceptions like SystemExit). 466 # Otherwise it looks like a bug in the code.

File ~/anaconda3/envs/privateGPT/lib/python3.11/http/client.py:1390, in HTTPConnection.getresponse(self) 1389 try: -> 1390 response.begin() 1391 except ConnectionError:

File ~/anaconda3/envs/privateGPT/lib/python3.11/http/client.py:325, in HTTPResponse.begin(self) 324 while True: --> 325 version, status, reason = self._read_status() 326 if status != CONTINUE:

File ~/anaconda3/envs/privateGPT/lib/python3.11/http/client.py:294, in HTTPResponse._read_status(self) 291 if not line: 292 # Presumably, the server closed the connection before 293 # sending a valid response. --> 294 raise RemoteDisconnected("Remote end closed connection without" 295 " response") 296 try:

ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))

During handling of the above exception, another exception occurred:

ConnectionError Traceback (most recent call last) Cell In[6], line 14 6 prompt = ChatPromptTemplate.from_template(template) 7 chain = ( 8 {"context": retriever, "question": RunnablePassthrough()} 9 | prompt 10 | llm 11 | StrOutputParser() 12 ) ---> 14 chain.invoke(input(""))

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/runnables/base.py:2499, in RunnableSequence.invoke(self, input, config) 2497 try: 2498 for i, step in enumerate(self.steps): -> 2499 input = step.invoke( 2500 input, 2501 # mark each step as a child run 2502 patch_config( 2503 config, callbacks=run_manager.get_child(f"seq:step:{i+1}") 2504 ), 2505 ) 2506 # finish the root run 2507 except BaseException as e:

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:158, in BaseChatModel.invoke(self, input, config, stop, kwargs) 147 def invoke( 148 self, 149 input: LanguageModelInput, (...) 153 kwargs: Any, 154 ) -> BaseMessage: 155 config = ensure_config(config) 156 return cast( 157 ChatGeneration, --> 158 self.generate_prompt( 159 [self._convert_input(input)], 160 stop=stop, 161 callbacks=config.get("callbacks"), 162 tags=config.get("tags"), 163 metadata=config.get("metadata"), 164 run_name=config.get("run_name"), 165 run_id=config.pop("run_id", None), 166 **kwargs, 167 ).generations[0][0], 168 ).message

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:560, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, kwargs) 552 def generate_prompt( 553 self, 554 prompts: List[PromptValue], (...) 557 kwargs: Any, 558 ) -> LLMResult: 559 prompt_messages = [p.to_messages() for p in prompts] --> 560 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:421, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs) 419 if run_managers: 420 run_managers[i].on_llm_error(e, response=LLMResult(generations=[])) --> 421 raise e 422 flattened_outputs = [ 423 LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item] 424 for res in results 425 ] 426 llm_output = self._combine_llm_outputs([res.llm_output for res in results])

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:411, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, kwargs) 408 for i, m in enumerate(messages): 409 try: 410 results.append( --> 411 self._generate_with_cache( 412 m, 413 stop=stop, 414 run_manager=run_managers[i] if run_managers else None, 415 kwargs, 416 ) 417 ) 418 except BaseException as e: 419 if run_managers:

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:632, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, kwargs) 630 else: 631 if inspect.signature(self._generate).parameters.get("run_manager"): --> 632 result = self._generate( 633 messages, stop=stop, run_manager=run_manager, kwargs 634 ) 635 else: 636 result = self._generate(messages, stop=stop, **kwargs)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_community/chat_models/ollama.py:259, in ChatOllama._generate(self, messages, stop, run_manager, kwargs) 235 def _generate( 236 self, 237 messages: List[BaseMessage], (...) 240 kwargs: Any, 241 ) -> ChatResult: 242 """Call out to Ollama's generate endpoint. 243 244 Args: (...) 256 ]) 257 """ --> 259 final_chunk = self._chat_stream_with_aggregation( 260 messages, 261 stop=stop, 262 run_manager=run_manager, 263 verbose=self.verbose, 264 **kwargs, 265 ) 266 chat_generation = ChatGeneration( 267 message=AIMessage(content=final_chunk.text), 268 generation_info=final_chunk.generation_info, 269 ) 270 return ChatResult(generations=[chat_generation])

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_community/chat_models/ollama.py:190, in ChatOllama._chat_stream_with_aggregation(self, messages, stop, run_manager, verbose, kwargs) 181 def _chat_stream_with_aggregation( 182 self, 183 messages: List[BaseMessage], (...) 187 kwargs: Any, 188 ) -> ChatGenerationChunk: 189 final_chunk: Optional[ChatGenerationChunk] = None --> 190 for stream_resp in self._create_chat_stream(messages, stop, **kwargs): 191 if stream_resp: 192 chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_community/chat_models/ollama.py:162, in ChatOllama._create_chat_stream(self, messages, stop, kwargs) 152 def _create_chat_stream( 153 self, 154 messages: List[BaseMessage], 155 stop: Optional[List[str]] = None, 156 kwargs: Any, 157 ) -> Iterator[str]: 158 payload = { 159 "model": self.model, 160 "messages": self._convert_messages_to_ollama_messages(messages), 161 } --> 162 yield from self._create_stream( 163 payload=payload, stop=stop, api_url=f"{self.base_url}/api/chat", **kwargs 164 )

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/langchain_community/llms/ollama.py:232, in _OllamaCommon._create_stream(self, api_url, payload, stop, kwargs) 225 request_payload = { 226 "prompt": payload.get("prompt"), 227 "images": payload.get("images", []), 228 params, 229 } 231 import ipdb; ipdb.set_trace() --> 232 response = requests.post( 233 url=api_url, 234 headers={ 235 "Content-Type": "application/json", 236 **(self.headers if isinstance(self.headers, dict) else {}), 237 }, 238 json=request_payload, 239 stream=True, 240 timeout=self.timeout, 241 ) 242 response.encoding = "utf-8" 243 if response.status_code != 200:

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/api.py:115, in post(url, data, json, kwargs) 103 def post(url, data=None, json=None, kwargs): 104 r"""Sends a POST request. 105 106 :param url: URL for the new :class:Request object. (...) 112 :rtype: requests.Response 113 """ --> 115 return request("post", url, data=data, json=json, **kwargs)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/api.py:59, in request(method, url, kwargs) 55 # By using the 'with' statement we are sure the session is closed, thus we 56 # avoid leaving sockets open which can trigger a ResourceWarning in some 57 # cases, and look like a memory leak in others. 58 with sessions.Session() as session: ---> 59 return session.request(method=method, url=url, kwargs)

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/sessions.py:589, in Session.request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json) 584 send_kwargs = { 585 "timeout": timeout, 586 "allow_redirects": allow_redirects, 587 } 588 send_kwargs.update(settings) --> 589 resp = self.send(prep, **send_kwargs) 591 return resp

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/sessions.py:703, in Session.send(self, request, kwargs) 700 start = preferred_clock() 702 # Send the request --> 703 r = adapter.send(request, kwargs) 705 # Total elapsed time of the request (approximately) 706 elapsed = preferred_clock() - start

File ~/anaconda3/envs/privateGPT/lib/python3.11/site-packages/requests/adapters.py:501, in HTTPAdapter.send(self, request, stream, timeout, verify, cert, proxies) 486 resp = conn.urlopen( 487 method=request.method, 488 url=url, (...) 497 chunked=chunked, 498 ) 500 except (ProtocolError, OSError) as err: --> 501 raise ConnectionError(err, request=request) 503 except MaxRetryError as e: 504 if isinstance(e.reason, ConnectTimeoutError): 505 # TODO: Remove this in 3.0.0: see #2811

ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))

In [7]: `

tonykipkemboi commented 5 months ago

Thanks for the details. Can you try updating Ollama and see if that resolves the issue?

Also, give this a try if the update does not work: https://stackoverflow.com/questions/77826085/connection-aborted-remotedisconnectedremote-end-closed-connection-without

saiaprameya commented 5 months ago

Hi Tony, thanks for the input. This fixed the problem.

ollama version is 0.1.32

Thanks for your help.