Elliott-Chong / chatpdf-yt

https://chatpdf-elliott.vercel.app
711 stars 299 forks source link

TypeError: messagesRef.current.concat is not a function #48

Closed HaroonHakimi closed 6 months ago

HaroonHakimi commented 6 months ago

Im getting this issue but I dont see where messagesRef is being used. I have tried changing initialMessages to initialMessages: !Array.isArray(data) || [], , but the ai messages and the user messages dont get saved to the database.

`"use client"; import React from "react"; import { Input } from "./ui/input"; import { useChat } from "ai/react"; import { Button } from "./ui/button"; import { Send } from "lucide-react"; import MessageList from "./MessageList"; import { useQuery } from "@tanstack/react-query"; import axios from "axios"; import { Message } from "ai";

type Props = { chatId: number };

const ChatComponent = ({ chatId }: Props) => { const { data, isLoading } = useQuery({ queryKey: ["chat", chatId], queryFn: async () => { const response = await axios.post<Message[]>("/api/get-messages", { chatId, }); return response.data; }, });

const { input, handleInputChange, handleSubmit, messages } = useChat({ api: "/api/chat", body: { chatId, }, initialMessages: data || [], }); React.useEffect(() => { const messageContainer = document.getElementById("message-container"); if (messageContainer) { messageContainer.scrollTo({ top: messageContainer.scrollHeight, behavior: "smooth", }); } }, [messages]); return ( <div className="relative max-h-screen overflow-scroll" id="message-container"

{/ header /}

Chat

  {/* message list */}
  <MessageList messages={messages} isLoading={isLoading} />

  <form
    onSubmit={handleSubmit}
    className="sticky bottom-0 inset-x-0 px-2 py-4 bg-white"
  >
    <div className="flex">
      <Input
        value={input}
        onChange={handleInputChange}
        placeholder="Ask any question..."
        className="w-full"
      />
      <Button className="bg-blue-600 ml-2">
        <Send className="h-4 w-4" />
      </Button>
    </div>
  </form>
</div>

); };

export default ChatComponent;`

`import { Configuration, OpenAIApi } from "openai-edge"; import { Message, OpenAIStream, StreamingTextResponse } from "ai"; import { getContext } from "@/lib/context"; import { db } from "@/lib/db"; import { chats, messages as _messages } from "@/lib/db/schema"; import { eq } from "drizzle-orm"; import { NextResponse } from "next/server";

export const runtime = "edge";

const config = new Configuration({ apiKey: process.env.OPENAI_API_KEY, }); const openai = new OpenAIApi(config);

export async function POST(req: Request) { try { const { messages, chatId } = await req.json(); const _chats = await db.select().from(chats).where(eq(chats.id, chatId)); if (_chats.length != 1) { return NextResponse.json({ error: "chat not found" }, { status: 404 }); } const fileKey = _chats[0].fileKey; const lastMessage = messages[messages.length - 1]; const context = await getContext(lastMessage.content, fileKey);

const prompt = {
  role: "system",
  content: `AI assistant is a brand new, powerful, human-like artificial intelligence.
  The traits of AI include expert knowledge, helpfulness, cleverness, and articulateness.
  AI is a well-behaved and well-mannered individual.
  AI is always friendly, kind, and inspiring, and he is eager to provide vivid and thoughtful responses to the user.
  AI has the sum of all knowledge in their brain, and is able to accurately answer nearly any question about any topic in conversation.
  AI assistant is a big fan of Pinecone and Vercel.
  START CONTEXT BLOCK
  ${context}
  END OF CONTEXT BLOCK
  AI assistant will take into account any CONTEXT BLOCK that is provided in a conversation.
  If the context does not provide the answer to question, the AI assistant will say, "I'm sorry, but I don't know the answer to that question".
  AI assistant will not apologize for previous responses, but instead will indicated new information was gained.
  AI assistant will not invent anything that is not drawn directly from the context.
  `,
};

const response = await openai.createChatCompletion({
  model: "gpt-3.5-turbo",
  messages: [
    prompt,
    ...messages.filter((message: Message) => message.role === "user"),
  ],
  stream: true,
});
const stream = OpenAIStream(response, {
  onStart: async () => {
    // save user message into db
    await db.insert(_messages).values({
      chatId,
      content: lastMessage.content,
      role: "user",
    });
  },
  onCompletion: async (completion) => {
    // save ai message into db
    await db.insert(_messages).values({
      chatId,
      content: completion,
      role: "system",
    });
  },
});
return new StreamingTextResponse(stream);

} catch (error) {} }`

`import { integer, pgEnum, pgTable, serial, text, timestamp, varchar, } from "drizzle-orm/pg-core";

export const userSystemEnum = pgEnum("user_system_enum", ["system", "user"]);

export const chats = pgTable("chats", { id: serial("id").primaryKey(), pdfName: text("pdf_name").notNull(), pdfUrl: text("pdf_url").notNull(), createdAt: timestamp("created_at").notNull().defaultNow(), userId: varchar("user_id", { length: 256 }).notNull(), fileKey: text("file_key").notNull(), });

export type DrizzleChat = typeof chats.$inferSelect;

export const messages = pgTable("messages", { id: serial("id").primaryKey(), chatId: integer("chat_id") .references(() => chats.id) .notNull(), content: text("content").notNull(), createdAt: timestamp("created_at").notNull().defaultNow(), role: userSystemEnum("role").notNull(), });

export const userSubscriptions = pgTable("user_subscriptions", { id: serial("id").primaryKey(), userId: varchar("user_id", { length: 256 }).notNull().unique(), stripeCustomerId: varchar("stripe_customer_id", { length: 256 }) .notNull() .unique(), stripeSubscriptionId: varchar("stripe_subscription_id", { length: 256 }) .notNull() .unique(), stripePriceId: varchar("stripe_price_id", { length: 256 }), stripeCurrentPeriodEnd: timestamp("stripe_current_period_end"), }); `

`import { Pinecone } from "@pinecone-database/pinecone"; import { convertToAscii } from "./utils"; import { getEmbeddings } from "./embeddings";

export async function getMatchesFromEmbeddings( embeddings: number[], fileKey: string ) { const apiKey = process.env.PINECONE_API_KEY;

const pinecone = new Pinecone({ apiKey: apiKey!, });

const index = await pinecone.Index("papertalk");

try { const namespace = convertToAscii(fileKey); const queryResponse = await index.namespace(namespace).query({ vector: embeddings, topK: 5, includeMetadata: true, });

return queryResponse.matches || [];

} catch (error) { console.log("error querying embeddings", error); throw error; } }

export async function getContext(query: string, fileKey: string) { const queryEmbeddings = await getEmbeddings(query); const matches = await getMatchesFromEmbeddings(queryEmbeddings, fileKey);

const qualifyingDocs = matches.filter( (match) => match.score && match.score > 0.7 );

type Metadata = { text: string, pageNumbe: number }

let docs = qualifyingDocs.map(match => (match.metadata as Metadata).text)

return docs.join("\n").substring(0,3000) } `

HaroonHakimi commented 6 months ago

.