vercel / ai

Build AI-powered applications with React, Svelte, Vue, and Solid
https://sdk.vercel.ai/docs
Other
8.49k stars 1.19k forks source link

body for svelte's useCompletion not dynamic #1728

Open xmlking opened 1 month ago

xmlking commented 1 month ago

Description

body is set only once for useCompletion, during initialization : body: { text: value }

For my case I need to pass extra param e.g., text to my backend api: api/completion. text is alway getting undefined on API side.

The React useCompletion SDK seams working fine. This looks like a bug only in Svelte implementation.

To Reproduce

  1. clone https://github.com/xmlking/spectacular
  2. set .env then run: turbo dev--filter=console
  3. Test: http://localhost:5173/magic-spell

Code example

body is set only once here during initialization : body: { text: value }
For my case I need to pass extra param text to my backend api: api/completion, text is alway getting undefined on API side.

The React useCompletion API seams working fine. this looks like a bug in Svelte implementation

<script lang="ts">
import { getToastStore } from '@skeletonlabs/skeleton';
import { AutoResizeTextarea } from '@spectacular/skeleton/components/auto-resize-textarea';
import { Logger } from '@spectacular/utils';
import { useCompletion } from 'ai/svelte';
import { AlertTriangle, Sparkles } from 'lucide-svelte';
import type { HTMLTextareaAttributes } from 'svelte/elements';
import { fade } from 'svelte/transition';

import { handleMessage } from '$lib/components/layout/toast-manager';
import { onMount } from 'svelte';
import { default as LoaderIcon } from './loader-icon.svelte';

const log = new Logger('experiments:ai:ms:browser');
const toastStore = getToastStore();
const api = '/api/completion';

/* FIXME */
/* eslint-disable @typescript-eslint/no-unused-vars,no-unused-vars */
interface $$Props extends HTMLTextareaAttributes {
  value?: any;
}

export let value = '';

/* eslint-disable @typescript-eslint/no-unused-vars,no-unused-vars */
const { complete, completion, input, isLoading, handleSubmit, error, stop } = useCompletion({
  api,
  body: { text: value },
  onFinish: (_prompt, completion) => {
    value = completion.trim();
  },
  onError: (error) => handleMessage({ type: 'error', message: error.message }, toastStore),
});

// callbacks
function handleChange(e: Event) {
  // if (!$isLoading)
  value = (e.target as HTMLSelectElement)?.value;
}

function handleSubmitWrap(e: SubmitEvent) {
  e.preventDefault();
  log.debug({ value });
  handleSubmit(e);
  input.set('');
}

onMount(() => {
  //  complete('some example prompt', {body: {text: 'eeee'}});
});
</script>

<form
  class="flex flex-col items-center"
  on:submit={handleSubmitWrap}
>
  <AutoResizeTextarea
    {...$$props}
    maxRows={100}
    minRows={4}
    disabled={$isLoading}
    value={$isLoading && $completion.length > 0 ? $completion.trim() : value}
    on:change={handleChange}
  />
  <div class="z-10 -mt-5">
    <fieldset disabled={$isLoading} class="input-group input-group-divider grid-cols-[1fr_auto]">
      <input
      type="search"
      class="input"
      placeholder="paraphrase it..."
      bind:value={$input}
      aria-label="Prompt"
      required />
      <button
      type="submit"
      class="variant-filled-secondary"
      aria-label="Submit">
        {#if $isLoading}
          <LoaderIcon />
        {:else}
          <Sparkles />
        {/if}
      </button>
    </fieldset>
</form>

my +server.ts

import { createOpenAI } from '@ai-sdk/openai';
import { error } from '@sveltejs/kit';
import { StreamingTextResponse, streamText } from 'ai';

import { env } from '$env/dynamic/private';
import { limiter } from '$lib/server/limiter/limiter';
import { Logger } from '@spectacular/utils';

const log = new Logger('experiments:ai:completion:server');

// https://sdk.vercel.ai/docs/getting-started/svelte
const openai = createOpenAI({
  organization: env.OPENAI_ORG_ID,
  project: env.OPENAI_PROJECT_ID,
  apiKey: env.OPENAI_API_KEY,
});

// free https://platform.openai.com/docs/guides/rate-limits/free-tier-rate-limits
const model = openai('gpt-3.5-turbo');
const system = `You are a text editor. You will be given a prompt and a text to edit, which may be empty or incomplete.
  Edit the text to match the prompt, and only respond with the full edited version of the text - do not include any other information, context, or explanation.
  If you add on to the text, respond with the full version, not just the new portion. Do not include the prompt or otherwise preface your response.
  Do not enclose the response in quotes.`;

export const POST = async (event) => {
  // ratelimit
  if (await limiter.isLimited(event)) error(429);

  const { request } = event;
  const { text, prompt } = await request.json();
  log.debug({ text, prompt });
  if (!prompt || !text) return new Response('Prompt is required', { status: 400 });

  const result = await streamText({ model, system, prompt: `Prompt: ${prompt}\nText: ${text}` });

  return new StreamingTextResponse(result.toAIStream());
};

Additional context

Code problem location: https://github.com/xmlking/spectacular/blob/55872cfeb0207801ac26a1e6f03c63d312033554/apps/console/src/lib/components/magic-spell-textarea/magic-spell-textarea.svelte#L29

zicho commented 3 weeks ago

I have experienced this issue as well. I work around it using

let myData: T;

const { input, handleSubmit, messages } = useChat({ sendExtraMessageFields: true, });

$: body = { myData };

async function submitHandler(event: SubmitEvent) {
    event.preventDefault();
    handleSubmit(event, {
    options: {
        body
        }
    });
}