jonrhall / openai-streaming-hooks

React Hooks for streaming connections to OpenAI APIs
MIT License
83 stars 18 forks source link

Error Handling for when the OpenAI Api fails? #2

Open rajatkulkarni95 opened 1 year ago

rajatkulkarni95 commented 1 year ago

Sometimes when the API fails the response back is something like {content: '', role: ''}

Try catching submitQuery doesn't yield any results. Is there a way to catch errors and showcase to the end user?

jonrhall commented 1 year ago

@rajatkulkarni95 I just published v2 of the chat Hook, this new version has made it a lot easier to catch and handle errors.

Your feature request should now be possible to implement via an error state that the hook could return. The logic already exists in the hook for catching the errors here, so really it's about representing those errors inside of the Hook's state.

This project does accept PRs if this is something you (or anyone else) are interested in contributing.

ikevin127 commented 7 months ago

Check out my fork if interested in adding some error handling, just copy & paste and adapt it to your needs.

Here's the patch ``` From 0031ed008a0a2f54e06f6514b90bf819232674ac Mon Sep 17 00:00:00 2001 From: Kevin Brian Bader Date: Mon, 29 Jan 2024 20:30:44 +0200 Subject: [PATCH] error handling, stream reader ff, gpt-4-turbo --- src/chat-hook.ts | 14 ++++-- src/chat-stream-handler.ts | 99 +++++++++++++++++++++----------------- src/types.ts | 11 ++++- 3 files changed, 74 insertions(+), 50 deletions(-) diff --git a/src/chat-hook.ts b/src/chat-hook.ts index 0f3c2e8..4637870 100644 --- a/src/chat-hook.ts +++ b/src/chat-hook.ts @@ -1,6 +1,7 @@ import React from 'react'; import { getOpenAiRequestOptions, + initialErrorPayload, openAiStreamingDataHandler, } from './chat-stream-handler'; import type { @@ -9,6 +10,7 @@ import type { ChatMessageParams, OpenAIStreamingParams, OpenAIChatRole, + ErrorPayload, } from './types'; const MILLISECONDS_PER_SECOND = 1000; @@ -30,6 +32,7 @@ const createChatMessage = ({ content, role, timestamp: restOfParams.timestamp ?? Date.now(), + errorPayload: initialErrorPayload, meta: { loading: false, responseTime: '', @@ -58,6 +61,8 @@ export const useChatCompletion = (apiParams: OpenAIStreamingParams) => { // Abort an in-progress streaming response const abortResponse = () => { + closeStream(Date.now()); + if (controller) { controller.abort(); setController(null); @@ -82,8 +87,8 @@ export const useChatCompletion = (apiParams: OpenAIStreamingParams) => { const handleNewData = (chunkContent: string, chunkRole: OpenAIChatRole) => { _setMessages( updateLastItem((msg) => ({ + ...msg, content: `${msg.content}${chunkContent}`, - role: `${msg.role}${chunkRole}` as OpenAIChatRole, timestamp: 0, meta: { ...msg.meta, @@ -101,7 +106,7 @@ export const useChatCompletion = (apiParams: OpenAIStreamingParams) => { }; // Handles what happens when the stream of a given completion is finished. - const closeStream = (beforeTimestamp: number) => { + const closeStream = (beforeTimestamp: number, errorPayload?: ErrorPayload) => { // Determine the final timestamp, and calculate the number of seconds the full request took. const afterTimestamp = Date.now(); const diffInSeconds = @@ -113,6 +118,7 @@ export const useChatCompletion = (apiParams: OpenAIStreamingParams) => { _setMessages( updateLastItem((msg) => ({ ...msg, + ...(errorPayload && { errorPayload }), timestamp: afterTimestamp, meta: { ...msg.meta, @@ -143,7 +149,7 @@ export const useChatCompletion = (apiParams: OpenAIStreamingParams) => { ...newMessages.map(createChatMessage), createChatMessage({ content: '', - role: '', + role: 'assistant', timestamp: 0, meta: { loading: true }, }), @@ -164,7 +170,7 @@ export const useChatCompletion = (apiParams: OpenAIStreamingParams) => { // Filter out the last message, since technically that is the message that the server will // return from this request, we're just storing a placeholder for it ahead of time to signal // to the UI something is happening. - .filter((m, i) => updatedMessages.length - 1 !== i) + .filter((_, i) => updatedMessages.length - 1 !== i) // Map the updated message structure to only what the OpenAI API expects. .map(officialOpenAIParams), signal diff --git a/src/chat-stream-handler.ts b/src/chat-stream-handler.ts index 807f398..7671fe2 100644 --- a/src/chat-stream-handler.ts +++ b/src/chat-stream-handler.ts @@ -4,6 +4,7 @@ import type { FetchRequestOptions, OpenAIChatRole, OpenAIChatCompletionChunk, + ErrorPayload, } from './types'; // Converts the OpenAI API params + chat messages list + an optional AbortSignal into a shape that @@ -28,8 +29,8 @@ export const getOpenAiRequestOptions = ( signal, }); +export const initialErrorPayload = {status: 0, message: '', type: '', param: '', code: '',}; const CHAT_COMPLETIONS_URL = 'https://api.openai.com/v1/chat/completions'; - const textDecoder = new TextDecoder('utf-8'); // Takes a set of fetch request options and calls the onIncomingChunk and onCloseStream functions @@ -37,64 +38,72 @@ const textDecoder = new TextDecoder('utf-8'); export const openAiStreamingDataHandler = async ( requestOpts: FetchRequestOptions, onIncomingChunk: (contentChunk: string, roleChunk: OpenAIChatRole) => void, - onCloseStream: (beforeTimestamp: number) => void + onCloseStream: (beforeTimestamp: number, errorPayload?: ErrorPayload) => void ) => { - // Record the timestamp before the request starts. const beforeTimestamp = Date.now(); - - // Initiate the completion request const response = await fetch(CHAT_COMPLETIONS_URL, requestOpts); + let responseJson: {error: ErrorPayload} = {error: initialErrorPayload}; + let errorPayload = null; - // If the response isn't OK (non-2XX HTTP code) report the HTTP status and description. - if (!response.ok) { - throw new Error( - `Network response was not ok: ${response.status} - ${response.statusText}` - ); - } + // If any api errors occur, close the stream and pass data for error handling + if (!response.ok || !response.body) { + responseJson = await response.json(); + errorPayload = {...responseJson.error ?? {}, status: response.status,}; + onCloseStream(beforeTimestamp, errorPayload); - // A response body should always exist, if there isn't one something has gone wrong. - if (!response.body) { - throw new Error('No body included in POST response object'); + // If the response isn't OK (non-2XX HTTP code) report the HTTP status and description. + if (!response.ok) { + throw new Error( + `Network response status: ${response.status} - ${errorPayload?.message}` + ); + // A response body should always exist, if there isn't one something has gone wrong. + } else { + throw new Error('No body included in POST response object.'); + } } - let content = ''; let role = ''; + let content = ''; - for await (const newData of response.body as unknown as NodeJS.ReadableStream) { - // Decode the data - const decodedData = textDecoder.decode(newData as Buffer); - // Split the data into lines to process - const lines = decodedData.split(/(\n){2}/); - // Parse the lines into chat completion chunks - const chunks: OpenAIChatCompletionChunk[] = lines - // Remove 'data:' prefix off each line - .map((line) => line.replace(/(\n)?^data:\s*/, '').trim()) - // Remove empty lines and "[DONE]" - .filter((line) => line !== '' && line !== '[DONE]') - // Parse JSON string - .map((line) => JSON.parse(line)); + const isTrue = true; + const reader = response.body.getReader(); - // Process each chunk and send an update to the registered handler. - for (const chunk of chunks) { - // Avoid empty line after single backtick - const contentChunk: string = ( - chunk.choices[0].delta.content ?? '' - ).replace(/^`\s*/, '`'); - // Most times the chunk won't contain a role, in those cases set the role to "" - const roleChunk: OpenAIChatRole = chunk.choices[0].delta.role ?? ''; + try { + while (isTrue) { + const { done, value } = await reader.read(); + if (done) { + break; + } - // Assign the new data to the rest of the data already received. - content = `${content}${contentChunk}`; - role = `${role}${roleChunk}`; + const decodedData = textDecoder.decode(value); + const lines = decodedData.split(/(\n){2}/); + const chunks: OpenAIChatCompletionChunk[] = lines + .map((line) => line.replace(/(\n)?^data:\s*/, '').trim()) + .filter((line) => line !== '' && line !== '[DONE]') + .map((line) => JSON.parse(line)); - onIncomingChunk(contentChunk, roleChunk); - } - } + for (const chunk of chunks) { + const contentChunk: string = ( + chunk.choices[0].delta.content ?? '' + ).replace(/^`\s*/, '`'); + const roleChunk: OpenAIChatRole = chunk.choices[0].delta.role ?? ''; - onCloseStream(beforeTimestamp); + content = `${content}${contentChunk}`; + role = `${role}${roleChunk}`; + + onIncomingChunk(contentChunk, roleChunk); + } + } - // Return the fully-assembled chat completion. - return { content, role } as OpenAIChatMessage; + onCloseStream(beforeTimestamp); + return { content, role } as OpenAIChatMessage; + } catch (e) { + errorPayload = {...initialErrorPayload, status: response.status, message: e.message,}; + onCloseStream(beforeTimestamp, errorPayload); + throw new Error( + `500 - Internal server error: ${responseJson.error?.message}` + ); + } }; export default openAiStreamingDataHandler; diff --git a/src/types.ts b/src/types.ts index 7161234..2c64bd4 100644 --- a/src/types.ts +++ b/src/types.ts @@ -1,6 +1,6 @@ export type GPT35Model = 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301'; -export type GPT4Model = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-32k' | 'gpt-4-32k-0314'; +export type GPT4Model = 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-32k' | 'gpt-4-32k-0314'; export type OpenAIChatRole = 'user' | 'assistant' | 'system' | ''; @@ -36,8 +36,17 @@ export interface ChatMessageParams extends OpenAIChatMessage { }; } +export type ErrorPayload = { + status: number; + message: string; + type: string; + param: string; + code: string; +}; + export interface ChatMessage extends OpenAIChatMessage { timestamp: number; + errorPayload: ErrorPayload; meta: { loading: boolean; responseTime: string; ```

Changes from main: