I just started using the model llama2:7b, and after I send a few messages, when I switch to gemma:7b model (without clearing the context), I get an error
POST http://127.0.0.1:11434/api/chat net::ERR_CONNECTION_REFUSED
fetchWithHeaders @ ollama.be8fd0da.mjs:69
post @ ollama.be8fd0da.mjs:88
processStreamableRequest @ ollama.be8fd0da.mjs:190
chat @ ollama.be8fd0da.mjs:248
AskGPT @ request.ts:13
handleSend @ chat.tsx:83
handleKeyDown @ chat.tsx:89
eval @ import.mjs:1477
callCallback @ react-dom.development.js:4164
invokeGuardedCallbackDev @ react-dom.development.js:4213
invokeGuardedCallback @ react-dom.development.js:4277
invokeGuardedCallbackAndCatchFirstError @ react-dom.development.js:4291
executeDispatch @ react-dom.development.js:9041
processDispatchQueueItemsInOrder @ react-dom.development.js:9073
processDispatchQueue @ react-dom.development.js:9086
dispatchEventsForPlugins @ react-dom.development.js:9097
eval @ react-dom.development.js:9288
batchedUpdates$1 @ react-dom.development.js:26135
batchedUpdates @ react-dom.development.js:3991
dispatchEventForPluginEventSystem @ react-dom.development.js:9287
dispatchEventWithEnableCapturePhaseSelectiveHydrationWithoutDiscreteEventReplay @ react-dom.development.js:6465
dispatchEvent @ react-dom.development.js:6457
dispatchDiscreteEvent @ react-dom.development.js:6430
Show 20 more frames
Show less
ollama.be8fd0da.mjs:69
Uncaught (in promise) TypeError: Failed to fetch
at fetchWithHeaders (ollama.be8fd0da.mjs:69:10)
at post (ollama.be8fd0da.mjs:88:26)
at Ollama.processStreamableRequest (ollama.be8fd0da.mjs:190:28)
at Ollama.chat (ollama.be8fd0da.mjs:248:17)
at AskGPT (request.ts:13:28)
at handleSend (chat.tsx:83:21)
at handleKeyDown (chat.tsx:89:19)
at eval (import.mjs:1477:9)
at HTMLUnknownElement.callCallback (react-dom.development.js:4164:14)
at Object.invokeGuardedCallbackDev (react-dom.development.js:4213:16)
at invokeGuardedCallback (react-dom.development.js:4277:31)
at invokeGuardedCallbackAndCatchFirstError (react-dom.development.js:4291:25)
at executeDispatch (react-dom.development.js:9041:3)
at processDispatchQueueItemsInOrder (react-dom.development.js:9073:7)
at processDispatchQueue (react-dom.development.js:9086:5)
at dispatchEventsForPlugins (react-dom.development.js:9097:3)
at eval (react-dom.development.js:9288:12)
at batchedUpdates$1 (react-dom.development.js:26135:12)
at batchedUpdates (react-dom.development.js:3991:12)
at dispatchEventForPluginEventSystem (react-dom.development.js:9287:3)
at dispatchEventWithEnableCapturePhaseSelectiveHydrationWithoutDiscreteEventReplay (react-dom.development.js:6465:5)
at dispatchEvent (react-dom.development.js:6457:5)
at dispatchDiscreteEvent (react-dom.development.js:6430:5)
I just started using the model llama2:7b, and after I send a few messages, when I switch to gemma:7b model (without clearing the context), I get an error
Is this an expected error?