From a9ea81a056e974f5c8333edf28b74f31c72a069d Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 26 Nov 2025 13:16:52 +0000 Subject: [PATCH 1/4] feat: add WebLLM client-side inference support Add support for running AI inference entirely in the browser using WebLLM, without requiring an API key. Users can now select "WebLLM (Local)" from the model dropdown to run inference locally using WebGPU. Key changes: - Add @built-in-ai/web-llm package for Vercel AI SDK integration - Add webllm model option to models.ts with isLocal flag - Create useWebLLMChat hook for client-side chat handling - Create WebLLMChat component for local inference UI - Create ChatWrapper to switch between API and local modes - Add /api/chat/webllm-save endpoint for persisting messages - Add WebLLMStatus component showing download/loading progress - Update entitlements to include webllm model for all users --- app/(chat)/api/chat/webllm-save/route.ts | 71 ++++++++ app/(chat)/chat/[id]/page.tsx | 6 +- app/(chat)/page.tsx | 6 +- components/chat-wrapper.tsx | 62 +++++++ components/chat.tsx | 9 +- components/webllm-chat.tsx | 210 +++++++++++++++++++++++ components/webllm-status.tsx | 193 +++++++++++++++++++++ hooks/use-webllm-chat.ts | 189 ++++++++++++++++++++ lib/ai/entitlements.ts | 4 +- lib/ai/models.ts | 12 ++ lib/ai/webllm-client.ts | 40 +++++ package.json | 1 + pnpm-lock.yaml | 36 +++- 13 files changed, 825 insertions(+), 14 deletions(-) create mode 100644 app/(chat)/api/chat/webllm-save/route.ts create mode 100644 components/chat-wrapper.tsx create mode 100644 components/webllm-chat.tsx create mode 100644 components/webllm-status.tsx create mode 100644 hooks/use-webllm-chat.ts create mode 100644 lib/ai/webllm-client.ts diff --git a/app/(chat)/api/chat/webllm-save/route.ts b/app/(chat)/api/chat/webllm-save/route.ts new file mode 100644 index 0000000000..e51c9bc078 --- /dev/null +++ b/app/(chat)/api/chat/webllm-save/route.ts @@ -0,0 +1,71 @@ +import { z } from "zod"; +import { auth } from "@/app/(auth)/auth"; +import { getChatById, saveChat, saveMessages } from "@/lib/db/queries"; +import { ChatSDKError } from "@/lib/errors"; +import { generateTitleFromUserMessage } from "../../../actions"; + +const webllmSaveSchema = z.object({ + chatId: z.string().uuid(), + messages: z.array( + z.object({ + id: z.string().uuid(), + role: z.enum(["user", "assistant"]), + parts: z.array(z.any()), + createdAt: z.string().or(z.date()).optional(), + }) + ), + visibility: z.enum(["public", "private"]).optional().default("private"), +}); + +export async function POST(request: Request) { + try { + const json = await request.json(); + const { chatId, messages, visibility } = webllmSaveSchema.parse(json); + + const session = await auth(); + + if (!session?.user) { + return new ChatSDKError("unauthorized:chat").toResponse(); + } + + const chat = await getChatById({ id: chatId }); + + if (!chat) { + const userMessage = messages.find((m) => m.role === "user"); + if (userMessage) { + const title = await generateTitleFromUserMessage({ + message: userMessage as any, + }); + + await saveChat({ + id: chatId, + userId: session.user.id, + title, + visibility, + }); + } + } else if (chat.userId !== session.user.id) { + return new ChatSDKError("forbidden:chat").toResponse(); + } + + await saveMessages({ + messages: messages.map((msg) => ({ + id: msg.id, + chatId, + role: msg.role, + parts: msg.parts, + attachments: [], + createdAt: msg.createdAt ? new Date(msg.createdAt) : new Date(), + })), + }); + + return Response.json({ success: true }); + } catch (error) { + if (error instanceof ChatSDKError) { + return error.toResponse(); + } + + console.error("Error saving WebLLM messages:", error); + return new ChatSDKError("bad_request:api").toResponse(); + } +} diff --git a/app/(chat)/chat/[id]/page.tsx b/app/(chat)/chat/[id]/page.tsx index f208fac603..475bcdea8b 100644 --- a/app/(chat)/chat/[id]/page.tsx +++ b/app/(chat)/chat/[id]/page.tsx @@ -2,7 +2,7 @@ import { cookies } from "next/headers"; import { notFound, redirect } from "next/navigation"; import { auth } from "@/app/(auth)/auth"; -import { Chat } from "@/components/chat"; +import { ChatWrapper } from "@/components/chat-wrapper"; import { DataStreamHandler } from "@/components/data-stream-handler"; import { DEFAULT_CHAT_MODEL } from "@/lib/ai/models"; import { getChatById, getMessagesByChatId } from "@/lib/db/queries"; @@ -45,7 +45,7 @@ export default async function Page(props: { params: Promise<{ id: string }> }) { if (!chatModelFromCookie) { return ( <> - }) { return ( <> - - - (initialMessages); + + const isWebLLM = isWebLLMModel(currentModelId); + + const handleModelChange = (modelId: string) => { + setCurrentModelId(modelId); + }; + + if (isWebLLM) { + return ( + + ); + } + + return ( + + ); +} diff --git a/components/chat.tsx b/components/chat.tsx index 4380db16a5..fecc641768 100644 --- a/components/chat.tsx +++ b/components/chat.tsx @@ -41,6 +41,7 @@ export function Chat({ isReadonly, autoResume, initialLastContext, + onModelChange, }: { id: string; initialMessages: ChatMessage[]; @@ -49,6 +50,7 @@ export function Chat({ isReadonly: boolean; autoResume: boolean; initialLastContext?: AppUsage; + onModelChange?: (modelId: string) => void; }) { const { visibilityType } = useChatVisibility({ chatId: id, @@ -68,6 +70,11 @@ export function Chat({ currentModelIdRef.current = currentModelId; }, [currentModelId]); + const handleModelChange = (modelId: string) => { + setCurrentModelId(modelId); + onModelChange?.(modelId); + }; + const { messages, setMessages, @@ -182,7 +189,7 @@ export function Chat({ chatId={id} input={input} messages={messages} - onModelChange={setCurrentModelId} + onModelChange={handleModelChange} selectedModelId={currentModelId} selectedVisibilityType={visibilityType} sendMessage={sendMessage} diff --git a/components/webllm-chat.tsx b/components/webllm-chat.tsx new file mode 100644 index 0000000000..cb587244ac --- /dev/null +++ b/components/webllm-chat.tsx @@ -0,0 +1,210 @@ +"use client"; + +import type { UseChatHelpers } from "@ai-sdk/react"; +import { useSearchParams } from "next/navigation"; +import { useCallback, useEffect, useState } from "react"; +import useSWR, { useSWRConfig } from "swr"; +import { unstable_serialize } from "swr/infinite"; +import { ChatHeader } from "@/components/chat-header"; +import { useArtifactSelector } from "@/hooks/use-artifact"; +import { useChatVisibility } from "@/hooks/use-chat-visibility"; +import { useWebLLMChat } from "@/hooks/use-webllm-chat"; +import type { Vote } from "@/lib/db/schema"; +import type { Attachment, ChatMessage } from "@/lib/types"; +import { fetcher } from "@/lib/utils"; +import { Artifact } from "./artifact"; +import { Messages } from "./messages"; +import { MultimodalInput } from "./multimodal-input"; +import { getChatHistoryPaginationKey } from "./sidebar-history"; +import { toast } from "./toast"; +import type { VisibilityType } from "./visibility-selector"; +import { WebLLMStatus } from "./webllm-status"; + +export function WebLLMChat({ + id, + initialMessages, + initialChatModel, + initialVisibilityType, + isReadonly, + onModelChange, +}: { + id: string; + initialMessages: ChatMessage[]; + initialChatModel: string; + initialVisibilityType: VisibilityType; + isReadonly: boolean; + onModelChange: (modelId: string) => void; +}) { + const { visibilityType } = useChatVisibility({ + chatId: id, + initialVisibilityType, + }); + + const { mutate } = useSWRConfig(); + const [input, setInput] = useState(""); + const [currentModelId, setCurrentModelId] = useState(initialChatModel); + + const { + messages, + setMessages, + sendMessage, + status, + stop, + modelStatus, + downloadProgress, + error, + } = useWebLLMChat({ + id, + initialMessages, + onFinish: () => { + mutate(unstable_serialize(getChatHistoryPaginationKey)); + }, + onError: (err) => { + toast({ + type: "error", + description: err.message, + }); + }, + }); + + const searchParams = useSearchParams(); + const query = searchParams.get("query"); + + const [hasAppendedQuery, setHasAppendedQuery] = useState(false); + + useEffect(() => { + if (query && !hasAppendedQuery && modelStatus === "available") { + sendMessage({ + role: "user" as const, + parts: [{ type: "text", text: query }], + }); + + setHasAppendedQuery(true); + window.history.replaceState({}, "", `/chat/${id}`); + } + }, [query, sendMessage, hasAppendedQuery, id, modelStatus]); + + const { data: votes } = useSWR( + messages.length >= 2 ? `/api/vote?chatId=${id}` : null, + fetcher + ); + + const [attachments, setAttachments] = useState([]); + const isArtifactVisible = useArtifactSelector((state) => state.isVisible); + + const handleModelChange = (modelId: string) => { + setCurrentModelId(modelId); + onModelChange(modelId); + }; + + // Map WebLLM status to a compatible status for Messages component + const chatStatus = + status === "loading-model" + ? "submitted" + : status === "error" + ? "ready" + : status; + + // Create adapter functions that match the expected UseChatHelpers types + const sendMessageAdapter: UseChatHelpers["sendMessage"] = + useCallback( + (message) => { + if (message && "parts" in message && message.parts) { + sendMessage({ + role: "user", + parts: message.parts, + }); + } + return Promise.resolve(); + }, + [sendMessage] + ); + + const regenerateAdapter: UseChatHelpers["regenerate"] = + useCallback(() => { + // WebLLM doesn't support regeneration in this implementation + return Promise.resolve(); + }, []); + + const stopAdapter = useCallback(() => { + stop(); + return Promise.resolve(); + }, [stop]); + + return ( + <> +
+ + +
+ +
+ + {error && ( +
+
+ {error.message} +
+
+ )} + + + +
+ {!isReadonly && ( + + )} +
+
+ + + + ); +} diff --git a/components/webllm-status.tsx b/components/webllm-status.tsx new file mode 100644 index 0000000000..5abe489873 --- /dev/null +++ b/components/webllm-status.tsx @@ -0,0 +1,193 @@ +"use client"; + +import { useEffect, useState } from "react"; +import { + checkWebLLMSupport, + type WebLLMAvailability, + type WebLLMProgress, +} from "@/lib/ai/webllm-client"; +import { cn } from "@/lib/utils"; + +interface WebLLMStatusProps { + modelStatus: WebLLMAvailability | "checking" | "loading"; + downloadProgress: WebLLMProgress | null; + className?: string; +} + +export function WebLLMStatus({ + modelStatus, + downloadProgress, + className, +}: WebLLMStatusProps) { + const statusConfig = { + checking: { + label: "Checking browser support...", + color: "text-muted-foreground", + bgColor: "bg-muted", + }, + unavailable: { + label: "WebGPU not supported", + color: "text-destructive", + bgColor: "bg-destructive/10", + }, + downloadable: { + label: "Model needs download", + color: "text-amber-600 dark:text-amber-400", + bgColor: "bg-amber-100 dark:bg-amber-900/20", + }, + downloading: { + label: "Downloading model...", + color: "text-blue-600 dark:text-blue-400", + bgColor: "bg-blue-100 dark:bg-blue-900/20", + }, + loading: { + label: "Loading model...", + color: "text-blue-600 dark:text-blue-400", + bgColor: "bg-blue-100 dark:bg-blue-900/20", + }, + available: { + label: "Running locally", + color: "text-green-600 dark:text-green-400", + bgColor: "bg-green-100 dark:bg-green-900/20", + }, + }; + + const config = statusConfig[modelStatus]; + + return ( +
+
+ {modelStatus === "checking" || modelStatus === "loading" ? ( + + ) : modelStatus === "downloading" ? ( + + ) : modelStatus === "available" ? ( + + ) : modelStatus === "unavailable" ? ( + + ) : ( + + )} + {config.label} +
+ {(modelStatus === "downloading" || modelStatus === "loading") && + downloadProgress && ( +
+
+
+
+ {Math.round(downloadProgress.progress * 100)}% +
+ )} +
+ ); +} + +function LoadingSpinner() { + return ( + + + + + ); +} + +function CheckIcon() { + return ( + + + + ); +} + +function XIcon() { + return ( + + + + ); +} + +function DownloadIcon() { + return ( + + + + ); +} + +export function WebLLMSupportCheck() { + const [supported, setSupported] = useState(null); + + useEffect(() => { + setSupported(checkWebLLMSupport()); + }, []); + + if (supported === null) return null; + + if (!supported) { + return ( +
+ WebGPU not supported: WebLLM requires a + WebGPU-compatible browser like Chrome 113+ or Edge 113+. +
+ ); + } + + return null; +} diff --git a/hooks/use-webllm-chat.ts b/hooks/use-webllm-chat.ts new file mode 100644 index 0000000000..fb2981e561 --- /dev/null +++ b/hooks/use-webllm-chat.ts @@ -0,0 +1,189 @@ +"use client"; + +import { type CoreMessage, streamText, type UIMessage } from "ai"; +import { useCallback, useRef, useState } from "react"; +import { + createWebLLMModel, + getWebLLMAvailability, + type WebLLMAvailability, + type WebLLMProgress, +} from "@/lib/ai/webllm-client"; +import type { ChatMessage } from "@/lib/types"; +import { generateUUID } from "@/lib/utils"; + +type WebLLMChatStatus = + | "ready" + | "submitted" + | "streaming" + | "error" + | "loading-model"; + +interface UseWebLLMChatOptions { + id: string; + initialMessages?: ChatMessage[]; + onFinish?: () => void; + onError?: (error: Error) => void; +} + +interface UseWebLLMChatReturn { + messages: ChatMessage[]; + setMessages: React.Dispatch>; + sendMessage: (message: { role: "user"; parts: ChatMessage["parts"] }) => void; + status: WebLLMChatStatus; + stop: () => void; + modelStatus: WebLLMAvailability | "checking" | "loading"; + downloadProgress: WebLLMProgress | null; + error: Error | null; +} + +export function useWebLLMChat({ + id, + initialMessages = [], + onFinish, + onError, +}: UseWebLLMChatOptions): UseWebLLMChatReturn { + const [messages, setMessages] = useState(initialMessages); + const [status, setStatus] = useState("ready"); + const [modelStatus, setModelStatus] = useState< + WebLLMAvailability | "checking" | "loading" + >("checking"); + const [downloadProgress, setDownloadProgress] = + useState(null); + const [error, setError] = useState(null); + const abortControllerRef = useRef(null); + + const sendMessage = useCallback( + async (message: { role: "user"; parts: ChatMessage["parts"] }) => { + const userMessage: ChatMessage = { + id: generateUUID(), + role: "user", + parts: message.parts, + metadata: { createdAt: new Date().toISOString() }, + }; + + setMessages((prev) => [...prev, userMessage]); + setStatus("loading-model"); + setError(null); + + try { + const availability = await getWebLLMAvailability(); + setModelStatus(availability); + + if (availability === "unavailable") { + throw new Error( + "WebLLM is not supported in this browser. Please use a WebGPU-compatible browser like Chrome or Edge." + ); + } + + if (availability === "downloadable" || availability === "downloading") { + setModelStatus("loading"); + } + + const model = createWebLLMModel((progress) => { + setDownloadProgress(progress); + }); + + setStatus("submitted"); + + const allMessages: CoreMessage[] = [...messages, userMessage].map( + (msg) => ({ + role: msg.role as "user" | "assistant", + content: msg.parts + .filter((p) => p.type === "text") + .map((p) => (p as { type: "text"; text: string }).text) + .join("\n"), + }) + ); + + const assistantMessageId = generateUUID(); + const assistantMessage: ChatMessage = { + id: assistantMessageId, + role: "assistant", + parts: [{ type: "text", text: "" }], + metadata: { createdAt: new Date().toISOString() }, + }; + + setMessages((prev) => [...prev, assistantMessage]); + setStatus("streaming"); + setModelStatus("available"); + + abortControllerRef.current = new AbortController(); + + const result = streamText({ + model, + messages: allMessages, + abortSignal: abortControllerRef.current.signal, + }); + + let fullText = ""; + for await (const chunk of result.textStream) { + fullText += chunk; + setMessages((prev) => + prev.map((msg) => + msg.id === assistantMessageId + ? { + ...msg, + parts: [{ type: "text", text: fullText }], + } + : msg + ) + ); + } + + await saveWebLLMMessages(id, userMessage, { + ...assistantMessage, + parts: [{ type: "text", text: fullText }], + }); + + setStatus("ready"); + onFinish?.(); + } catch (err) { + const error = + err instanceof Error ? err : new Error("Unknown error occurred"); + if (error.name !== "AbortError") { + setError(error); + setStatus("error"); + onError?.(error); + } else { + setStatus("ready"); + } + } + }, + [messages, id, onFinish, onError] + ); + + const stop = useCallback(() => { + abortControllerRef.current?.abort(); + setStatus("ready"); + }, []); + + return { + messages, + setMessages, + sendMessage, + status, + stop, + modelStatus, + downloadProgress, + error, + }; +} + +async function saveWebLLMMessages( + chatId: string, + userMessage: ChatMessage, + assistantMessage: ChatMessage +) { + try { + await fetch("/api/chat/webllm-save", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + chatId, + messages: [userMessage, assistantMessage], + }), + }); + } catch (err) { + console.warn("Failed to save WebLLM messages:", err); + } +} diff --git a/lib/ai/entitlements.ts b/lib/ai/entitlements.ts index 4577974a0f..6dd0b58b4e 100644 --- a/lib/ai/entitlements.ts +++ b/lib/ai/entitlements.ts @@ -12,7 +12,7 @@ export const entitlementsByUserType: Record = { */ guest: { maxMessagesPerDay: 20, - availableChatModelIds: ["chat-model", "chat-model-reasoning"], + availableChatModelIds: ["chat-model", "chat-model-reasoning", "webllm"], }, /* @@ -20,7 +20,7 @@ export const entitlementsByUserType: Record = { */ regular: { maxMessagesPerDay: 100, - availableChatModelIds: ["chat-model", "chat-model-reasoning"], + availableChatModelIds: ["chat-model", "chat-model-reasoning", "webllm"], }, /* diff --git a/lib/ai/models.ts b/lib/ai/models.ts index 5696bb57e7..f8bf347d12 100644 --- a/lib/ai/models.ts +++ b/lib/ai/models.ts @@ -4,6 +4,7 @@ export type ChatModel = { id: string; name: string; description: string; + isLocal?: boolean; }; export const chatModels: ChatModel[] = [ @@ -18,4 +19,15 @@ export const chatModels: ChatModel[] = [ description: "Uses advanced chain-of-thought reasoning for complex problems", }, + { + id: "webllm", + name: "WebLLM (Local)", + description: + "Runs entirely in your browser - no API key required. Requires WebGPU.", + isLocal: true, + }, ]; + +export function isWebLLMModel(modelId: string): boolean { + return modelId === "webllm"; +} diff --git a/lib/ai/webllm-client.ts b/lib/ai/webllm-client.ts new file mode 100644 index 0000000000..384978ce9d --- /dev/null +++ b/lib/ai/webllm-client.ts @@ -0,0 +1,40 @@ +"use client"; + +import { + doesBrowserSupportWebLLM, + type WebLLMProgress, + webLLM, +} from "@built-in-ai/web-llm"; + +export const WEBLLM_MODEL_ID = "Llama-3.2-3B-Instruct-q4f16_1-MLC"; + +export type WebLLMAvailability = + | "unavailable" + | "downloadable" + | "downloading" + | "available"; + +export type { WebLLMProgress }; + +export function createWebLLMModel( + onProgress?: (progress: WebLLMProgress) => void +) { + return webLLM(WEBLLM_MODEL_ID, { + initProgressCallback: onProgress, + }); +} + +export function checkWebLLMSupport(): boolean { + if (typeof window === "undefined") return false; + return doesBrowserSupportWebLLM(); +} + +export async function getWebLLMAvailability(): Promise { + if (!checkWebLLMSupport()) { + return "unavailable"; + } + const model = webLLM(WEBLLM_MODEL_ID); + return model.availability(); +} + +export { webLLM }; diff --git a/package.json b/package.json index f85e609051..b6dedaa91c 100644 --- a/package.json +++ b/package.json @@ -22,6 +22,7 @@ "@ai-sdk/provider": "2.0.0", "@ai-sdk/react": "2.0.26", "@ai-sdk/xai": "2.0.13", + "@built-in-ai/web-llm": "^0.3.1", "@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-python": "^6.1.6", "@codemirror/state": "^6.5.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index fba1d7dd4f..bc84e02e4b 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -20,6 +20,9 @@ importers: '@ai-sdk/xai': specifier: 2.0.13 version: 2.0.13(zod@3.25.76) + '@built-in-ai/web-llm': + specifier: ^0.3.1 + version: 0.3.1(ai@5.0.26(zod@3.25.76)) '@codemirror/lang-javascript': specifier: ^6.2.2 version: 6.2.3 @@ -407,6 +410,11 @@ packages: '@braintree/sanitize-url@7.1.1': resolution: {integrity: sha512-i1L7noDNxtFyL5DmZafWy1wRVhGehQmzZaz1HiN5e7iylJMSZR7ekOV7NsIqa5qBldlLrsKv4HbgFUVlQrz8Mw==} + '@built-in-ai/web-llm@0.3.1': + resolution: {integrity: sha512-gZBxiz/x/5+3m9SEFn+2t0P2I+GEn/0tDKeDCzUc/PJEQ6a6eIG8SO+RtRgWcxRRshQJihDG7nqEa69Ouw7Juw==} + peerDependencies: + ai: '>=5.0.0' + '@chevrotain/cst-dts-gen@11.0.3': resolution: {integrity: sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==} @@ -1072,6 +1080,9 @@ packages: '@mermaid-js/parser@0.6.3': resolution: {integrity: sha512-lnjOhe7zyHjc+If7yT4zoedx2vo4sHaTmtkl1+or8BRTnCtDmcTpAjpzDSfCZrshM5bCoz0GyidzadJAH1xobA==} + '@mlc-ai/web-llm@0.2.80': + resolution: {integrity: sha512-Hwy1OCsK5cOU4nKr2wIJ2qA1g595PENtO5f2d9Wd/GgFsj5X04uxfaaJfqED8eFAJOpQpn/DirogdEY/yp5jQg==} + '@neondatabase/serverless@0.9.5': resolution: {integrity: sha512-siFas6gItqv6wD/pZnvdu34wEqgG3nSE6zWZdq5j2DEsa+VvX8i/5HXJOo06qrw5axPXn+lGCxeR+NLaSPIXug==} @@ -1148,31 +1159,31 @@ packages: resolution: {integrity: sha512-BdBGhQBh8IjZ2oIIX6F2/Q3LKm/FDDKi6ccYKcBTeilh6SNdNKveDOLk73BkSJjQLJk6qe4Yh+hHw1UPhCDdrg==} engines: {node: '>=14'} peerDependencies: - '@opentelemetry/api': 1.9.0 + '@opentelemetry/api': ^1.3.0 '@opentelemetry/resources@1.30.1': resolution: {integrity: sha512-5UxZqiAgLYGFjS4s9qm5mBVo433u+dSPUFWVWXmLAD4wB65oMCoXaJP1KJa9DIYYMeHu3z4BZcStG3LC593cWA==} engines: {node: '>=14'} peerDependencies: - '@opentelemetry/api': 1.9.0 + '@opentelemetry/api': '>=1.0.0 <1.10.0' '@opentelemetry/sdk-logs@0.57.2': resolution: {integrity: sha512-TXFHJ5c+BKggWbdEQ/inpgIzEmS2BGQowLE9UhsMd7YYlUfBQJ4uax0VF/B5NYigdM/75OoJGhAV3upEhK+3gg==} engines: {node: '>=14'} peerDependencies: - '@opentelemetry/api': 1.9.0 + '@opentelemetry/api': '>=1.4.0 <1.10.0' '@opentelemetry/sdk-metrics@1.30.1': resolution: {integrity: sha512-q9zcZ0Okl8jRgmy7eNW3Ku1XSgg3sDLa5evHZpCwjspw7E8Is4K/haRPDJrBcX3YSn/Y7gUvFnByNYEKQNbNog==} engines: {node: '>=14'} peerDependencies: - '@opentelemetry/api': 1.9.0 + '@opentelemetry/api': '>=1.3.0 <1.10.0' '@opentelemetry/sdk-trace-base@1.30.1': resolution: {integrity: sha512-jVPgBbH1gCy2Lb7X0AVQ8XAfgg0pJ4nvl8/IiQA6nxOsPvS+0zMJaFSs2ltXe0J6C8dqjcnpyqINDJmU30+uOg==} engines: {node: '>=14'} peerDependencies: - '@opentelemetry/api': 1.9.0 + '@opentelemetry/api': '>=1.0.0 <1.10.0' '@opentelemetry/semantic-conventions@1.28.0': resolution: {integrity: sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==} @@ -3364,6 +3375,10 @@ packages: lodash.merge@4.6.2: resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + loglevel@1.9.2: + resolution: {integrity: sha512-HgMmCqIJSAKqo68l0rS2AanEWfkxaZ5wNiEFb5ggm08lDs9Xl2KxBlX3PTcaD2chBM1gXAYf491/M2Rv8Jwayg==} + engines: {node: '>= 0.6.0'} + longest-streak@3.1.0: resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==} @@ -4530,6 +4545,11 @@ snapshots: '@braintree/sanitize-url@7.1.1': {} + '@built-in-ai/web-llm@0.3.1(ai@5.0.26(zod@3.25.76))': + dependencies: + '@mlc-ai/web-llm': 0.2.80 + ai: 5.0.26(zod@3.25.76) + '@chevrotain/cst-dts-gen@11.0.3': dependencies: '@chevrotain/gast': 11.0.3 @@ -5020,6 +5040,10 @@ snapshots: dependencies: langium: 3.3.1 + '@mlc-ai/web-llm@0.2.80': + dependencies: + loglevel: 1.9.2 + '@neondatabase/serverless@0.9.5': dependencies: '@types/pg': 8.11.6 @@ -7284,6 +7308,8 @@ snapshots: lodash.merge@4.6.2: {} + loglevel@1.9.2: {} + longest-streak@3.1.0: {} loupe@3.2.1: {} From eeda35d9020cd02f380125a292362a3257f177a0 Mon Sep 17 00:00:00 2001 From: Claude Date: Wed, 26 Nov 2025 13:17:52 +0000 Subject: [PATCH 2/4] style: apply linter formatting changes Auto-formatting applied by biome linter for consistent code style. --- app/(chat)/actions.ts | 2 +- app/(chat)/api/history/route.ts | 2 +- app/globals.css | 2 +- components/app-sidebar.tsx | 16 ++- components/data-stream-handler.tsx | 2 +- components/elements/context.tsx | 2 +- components/message.tsx | 5 +- components/multimodal-input.tsx | 22 ++--- components/preview-attachment.tsx | 6 +- components/weather.tsx | 153 ++++++++++++++++++++--------- lib/ai/prompts.ts | 2 +- lib/ai/tools/get-weather.ts | 27 +++-- lib/db/queries.ts | 2 +- tsconfig.tsbuildinfo | 1 + 14 files changed, 160 insertions(+), 84 deletions(-) create mode 100644 tsconfig.tsbuildinfo diff --git a/app/(chat)/actions.ts b/app/(chat)/actions.ts index 5bc9b4c216..dde22902f4 100644 --- a/app/(chat)/actions.ts +++ b/app/(chat)/actions.ts @@ -3,8 +3,8 @@ import { generateText, type UIMessage } from "ai"; import { cookies } from "next/headers"; import type { VisibilityType } from "@/components/visibility-selector"; -import { myProvider } from "@/lib/ai/providers"; import { titlePrompt } from "@/lib/ai/prompts"; +import { myProvider } from "@/lib/ai/providers"; import { deleteMessagesByChatIdAfterTimestamp, getMessageById, diff --git a/app/(chat)/api/history/route.ts b/app/(chat)/api/history/route.ts index 2525a9a1f0..23615e305a 100644 --- a/app/(chat)/api/history/route.ts +++ b/app/(chat)/api/history/route.ts @@ -1,6 +1,6 @@ import type { NextRequest } from "next/server"; import { auth } from "@/app/(auth)/auth"; -import { getChatsByUserId, deleteAllChatsByUserId } from "@/lib/db/queries"; +import { deleteAllChatsByUserId, getChatsByUserId } from "@/lib/db/queries"; import { ChatSDKError } from "@/lib/errors"; export async function GET(request: NextRequest) { diff --git a/app/globals.css b/app/globals.css index 70285a3526..41ec4c9626 100644 --- a/app/globals.css +++ b/app/globals.css @@ -4,7 +4,7 @@ @source '../node_modules/streamdown/dist/index.js'; /* Add KaTeX CSS for math rendering */ -@import 'katex/dist/katex.min.css'; +@import "katex/dist/katex.min.css"; /* custom variant for setting dark mode programmatically */ @custom-variant dark (&:is(.dark, .dark *)); diff --git a/components/app-sidebar.tsx b/components/app-sidebar.tsx index 5210804974..117089e949 100644 --- a/components/app-sidebar.tsx +++ b/components/app-sidebar.tsx @@ -8,7 +8,10 @@ import { toast } from "sonner"; import { useSWRConfig } from "swr"; import { unstable_serialize } from "swr/infinite"; import { PlusIcon, TrashIcon } from "@/components/icons"; -import { SidebarHistory, getChatHistoryPaginationKey } from "@/components/sidebar-history"; +import { + getChatHistoryPaginationKey, + SidebarHistory, +} from "@/components/sidebar-history"; import { SidebarUserNav } from "@/components/sidebar-user-nav"; import { Button } from "@/components/ui/button"; import { @@ -19,7 +22,6 @@ import { SidebarMenu, useSidebar, } from "@/components/ui/sidebar"; -import { Tooltip, TooltipContent, TooltipTrigger } from "./ui/tooltip"; import { AlertDialog, AlertDialogAction, @@ -30,6 +32,7 @@ import { AlertDialogHeader, AlertDialogTitle, } from "./ui/alert-dialog"; +import { Tooltip, TooltipContent, TooltipTrigger } from "./ui/tooltip"; export function AppSidebar({ user }: { user: User | undefined }) { const router = useRouter(); @@ -118,13 +121,16 @@ export function AppSidebar({ user }: { user: User | undefined }) { {user && } - + Delete all chats? - This action cannot be undone. This will permanently delete all your - chats and remove them from our servers. + This action cannot be undone. This will permanently delete all + your chats and remove them from our servers. diff --git a/components/data-stream-handler.tsx b/components/data-stream-handler.tsx index 65aa0c414f..270b8d2a85 100644 --- a/components/data-stream-handler.tsx +++ b/components/data-stream-handler.tsx @@ -6,7 +6,7 @@ import { artifactDefinitions } from "./artifact"; import { useDataStream } from "./data-stream-provider"; export function DataStreamHandler() { - const { dataStream,setDataStream } = useDataStream(); + const { dataStream, setDataStream } = useDataStream(); const { artifact, setArtifact, setMetadata } = useArtifact(); diff --git a/components/elements/context.tsx b/components/elements/context.tsx index 96af118367..8d5d6db32f 100644 --- a/components/elements/context.tsx +++ b/components/elements/context.tsx @@ -114,7 +114,7 @@ export const Context = ({ className, usage, ...props }: ContextProps) => { className={cn( "inline-flex select-none items-center gap-1 rounded-md text-sm", "cursor-pointer bg-background text-foreground", - "focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 outline-none ring-offset-background", + "outline-none ring-offset-background focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2", className )} type="button" diff --git a/components/message.tsx b/components/message.tsx index 41fd613b98..53e86d1a6c 100644 --- a/components/message.tsx +++ b/components/message.tsx @@ -328,12 +328,9 @@ export const ThinkingMessage = () => {
-
- Thinking... -
+
Thinking...
); }; - diff --git a/components/multimodal-input.tsx b/components/multimodal-input.tsx index 299f140bea..3e78502b60 100644 --- a/components/multimodal-input.tsx +++ b/components/multimodal-input.tsx @@ -231,14 +231,14 @@ function PureMultimodalInput({ }, [setAttachments, uploadFile] ); - + const handlePaste = useCallback( async (event: ClipboardEvent) => { const items = event.clipboardData?.items; if (!items) return; const imageItems = Array.from(items).filter((item) => - item.type.startsWith('image/'), + item.type.startsWith("image/") ); if (imageItems.length === 0) return; @@ -246,7 +246,7 @@ function PureMultimodalInput({ // Prevent default paste behavior for images event.preventDefault(); - setUploadQueue((prev) => [...prev, 'Pasted image']); + setUploadQueue((prev) => [...prev, "Pasted image"]); try { const uploadPromises = imageItems.map(async (item) => { @@ -260,7 +260,7 @@ function PureMultimodalInput({ (attachment) => attachment !== undefined && attachment.url !== undefined && - attachment.contentType !== undefined, + attachment.contentType !== undefined ); setAttachments((curr) => [ @@ -268,13 +268,13 @@ function PureMultimodalInput({ ...(successfullyUploadedAttachments as Attachment[]), ]); } catch (error) { - console.error('Error uploading pasted images:', error); - toast.error('Failed to upload pasted image(s)'); + console.error("Error uploading pasted images:", error); + toast.error("Failed to upload pasted image(s)"); } finally { setUploadQueue([]); } }, - [setAttachments], + [setAttachments] ); // Add paste event listener to textarea @@ -282,8 +282,8 @@ function PureMultimodalInput({ const textarea = textareaRef.current; if (!textarea) return; - textarea.addEventListener('paste', handlePaste); - return () => textarea.removeEventListener('paste', handlePaste); + textarea.addEventListener("paste", handlePaste); + return () => textarea.removeEventListener("paste", handlePaste); }, [handlePaste]); return ( @@ -385,9 +385,9 @@ function PureMultimodalInput({ ) : ( 0} status={status} - data-testid="send-button" > @@ -482,7 +482,7 @@ function PureModelSelectorCompact({ value={selectedModel?.name} > -