SpellcraftAI / nextjs-openai

Hooks and components for working with OpenAI streams.
https://nextjs-openai.vercel.app
MIT License
237 stars 18 forks source link

TypeError: Response body object should not be disturbed or locked #20

Open softmarshmallow opened 6 months ago

softmarshmallow commented 6 months ago

On NextJS 18, I'm getting tons of errors when I simple just add data parameter to it.

OK

  const { buffer, refresh, cancel, done } = useTextBuffer({
    url: "/ai/completions/prompt",
    throttle: 100,
    options: {
      method: "POST",
    },
  });

THROWS

  const { buffer, refresh, cancel, done } = useTextBuffer({
    url: "/ai/completions/prompt",
    throttle: 100,
    data: {
      genre: "genre",
      keywords: "keywords",
      playlist: "playlist",
    },
    options: {
      method: "POST",
    },
  });

Any Ideas?

ai:dev:  ⨯ TypeError: Response body object should not be disturbed or locked
ai:dev:     at extractBody (node:internal/deps/undici/undici:4323:17)
ai:dev:     at new _Request (node:internal/deps/undici/undici:5272:48)
ai:dev:     at new NextRequest (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/web/spec-extension/request.js:33:14)
ai:dev:     at NextRequestAdapter.fromNodeNextRequest (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/web/spec-extension/adapters/next-request.js:94:16)
ai:dev:     at NextRequestAdapter.fromBaseNextRequest (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/web/spec-extension/adapters/next-request.js:70:35)
ai:dev:     at doRender (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/base-server.js:1329:73)
ai:dev:     at cacheEntry.responseCache.get.routeKind (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/base-server.js:1552:34)
ai:dev:     at ResponseCache.get (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/response-cache/index.js:49:26)
ai:dev:     at DevServer.renderToResponseWithComponentsImpl (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/base-server.js:1460:53)
ai:dev:     at /Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/base-server.js:990:121
ai:dev:     at NextTracerImpl.trace (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/lib/trace/tracer.js:104:20)
ai:dev:     at DevServer.renderToResponseWithComponents (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/base-server.js:990:41)
ai:dev:     at DevServer.renderPageComponent (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/base-server.js:1843:35)
ai:dev:     at async DevServer.renderToResponseImpl (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/base-server.js:1881:32)
ai:dev:     at async DevServer.pipeImpl (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/base-server.js:909:25)
ai:dev:     at async NextNodeServer.handleCatchallRenderRequest (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/next-server.js:266:17)
ai:dev:     at async DevServer.handleRequestImpl (/Users/studio/Documents/grida-enterprise/project-lemon/node_modules/.pnpm/next@14.0.4_react-dom@18.2.0_react@18.2.0/node_modules/next/dist/server/base-server.js:805:17)

Server side

import { chat_completions, chat_completions_stream } from "@/lib/openai";
import { NextRequest, NextResponse } from "next/server";

export async function POST(req: NextRequest) {
  const data = await req.json();
  console.log("POST", data);
  const stream = await chat_completions_stream();
  return new NextResponse(stream);
}

export const config = {
  runtime: "edge",
};

Lib

import { OpenAI as OpenAiStreams } from "openai-streams";

export async function chat_completions_stream() {
  return await OpenAiStreams("chat", {
    model: "gpt-4",
    messages: [{ role: "user", content: "Say this is a test" }],
    max_tokens: 25,
  });
}
ctjlewis commented 6 months ago

Please use Vercel's AI SDK. This library was written before it existed and is no longer really maintained except for our own projects.

https://vercel.com/blog/introducing-the-vercel-ai-sdk

softmarshmallow commented 6 months ago

Ah. thanks for the quick response. Alright I'll check it out. didn't know that exsited.