Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
116 changes: 110 additions & 6 deletions bun.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion packages/tools/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@supermemory/tools",
"type": "module",
"version": "1.4.01",
"version": "1.4.02",
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",
Expand Down
157 changes: 86 additions & 71 deletions packages/tools/src/vercel/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -119,98 +119,113 @@ const wrapVercelLanguageModel = <T extends LanguageModel>(
promptTemplate: options?.promptTemplate,
})

const wrappedModel = {
...model,
// Proxy keeps prototype/getter fields (e.g. provider, modelId) that `{ ...model }` drops.
return new Proxy(model, {
get(target, prop, receiver) {
if (prop === "doGenerate") {
return async (params: LanguageModelCallOptions) => {
try {
const transformedParams = await transformParamsWithMemory(
params,
ctx,
)

// biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3
const result = await target.doGenerate(transformedParams as any)

doGenerate: async (params: LanguageModelCallOptions) => {
try {
const transformedParams = await transformParamsWithMemory(params, ctx)

// biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3
const result = await model.doGenerate(transformedParams as any)

const userMessage = getLastUserMessage(params)
if (ctx.addMemory === "always" && userMessage && userMessage.trim()) {
const assistantResponseText = extractAssistantResponseText(
result.content as unknown[],
)
saveMemoryAfterResponse(
ctx.client,
ctx.containerTag,
ctx.conversationId,
assistantResponseText,
params,
ctx.logger,
ctx.apiKey,
ctx.normalizedBaseUrl,
)
}

return result
} catch (error) {
ctx.logger.error("Error generating response", {
error: error instanceof Error ? error.message : "Unknown error",
})
throw error
}
},

doStream: async (params: LanguageModelCallOptions) => {
let generatedText = ""

try {
const transformedParams = await transformParamsWithMemory(params, ctx)

const { stream, ...rest } = await model.doStream(
// biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3
transformedParams as any,
)

const transformStream = new TransformStream<
LanguageModelStreamPart,
LanguageModelStreamPart
>({
transform(chunk, controller) {
if (chunk.type === "text-delta") {
generatedText += chunk.delta
}
controller.enqueue(chunk)
},
flush: async () => {
const userMessage = getLastUserMessage(params)
if (
ctx.addMemory === "always" &&
userMessage &&
userMessage.trim()
) {
const assistantResponseText = extractAssistantResponseText(
result.content as unknown[],
)
saveMemoryAfterResponse(
ctx.client,
ctx.containerTag,
ctx.conversationId,
generatedText,
assistantResponseText,
params,
ctx.logger,
ctx.apiKey,
ctx.normalizedBaseUrl,
)
}
},
})

return {
stream: stream.pipeThrough(transformStream),
...rest,
return result
} catch (error) {
ctx.logger.error("Error generating response", {
error: error instanceof Error ? error.message : "Unknown error",
})
throw error
}
}
} catch (error) {
ctx.logger.error("Error streaming response", {
error: error instanceof Error ? error.message : "Unknown error",
})
throw error
}
},
} as T

return wrappedModel
if (prop === "doStream") {
return async (params: LanguageModelCallOptions) => {
let generatedText = ""

try {
const transformedParams = await transformParamsWithMemory(
params,
ctx,
)

const { stream, ...rest } = await target.doStream(
// biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3
transformedParams as any,
)

const transformStream = new TransformStream<
LanguageModelStreamPart,
LanguageModelStreamPart
>({
transform(chunk, controller) {
if (chunk.type === "text-delta") {
generatedText += chunk.delta
}
controller.enqueue(chunk)
},
flush: async () => {
const userMessage = getLastUserMessage(params)
if (
ctx.addMemory === "always" &&
userMessage &&
userMessage.trim()
) {
saveMemoryAfterResponse(
ctx.client,
ctx.containerTag,
ctx.conversationId,
generatedText,
params,
ctx.logger,
ctx.apiKey,
ctx.normalizedBaseUrl,
)
}
},
})

return {
stream: stream.pipeThrough(transformStream),
...rest,
}
} catch (error) {
ctx.logger.error("Error streaming response", {
error: error instanceof Error ? error.message : "Unknown error",
})
throw error
}
}
}

return Reflect.get(target, prop, receiver)
},
}) as T
}

export {
Expand Down
22 changes: 6 additions & 16 deletions packages/tools/test/chatapp/app/api/chat/route.ts
Original file line number Diff line number Diff line change
@@ -1,27 +1,17 @@
import { streamText, type ModelMessage } from "ai"
import { openai } from "@ai-sdk/openai"
import { withSupermemory } from "../../../../../src/vercel"
import { gateway, streamText, type ModelMessage } from "ai"
import { withSupermemory } from "@supermemory/tools/ai-sdk"

const model = withSupermemory(openai("gpt-4"), "user-123", {
const model = withSupermemory(gateway("google/gemini-2.5-flash"), "user-1", {
apiKey: process.env.SUPERMEMORY_API_KEY ?? "",
mode: "full",
addMemory: "always",
conversationId: "chat-session",
verbose: true,
baseUrl: process.env.SUPERMEMORY_BASE_URL,
})

export async function POST(req: Request) {
const { messages }: { messages: ModelMessage[] } = await req.json()

// Commented out generateText implementation
// const { response } = await generateText({
// model,
// system: "You are a helpful assistant.",
// messages,
// })
// return Response.json({ messages: response.messages })

// New streaming implementation
const result = await streamText({
const result = streamText({
model,
system: "You are a helpful assistant.",
messages,
Expand Down
3 changes: 1 addition & 2 deletions packages/tools/test/chatapp/app/api/openai-chat/route.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import { OpenAI } from "openai"
//import { withSupermemory } from "@supermemory/tools/openai"
import { withSupermemory } from "../../../../../src/openai"
import { withSupermemory } from "@supermemory/tools/openai"

export const runtime = "nodejs"

Expand Down
51 changes: 40 additions & 11 deletions packages/tools/test/chatapp/app/api/stream/route.ts
Original file line number Diff line number Diff line change
@@ -1,22 +1,51 @@
import { convertToModelMessages, streamText, type UIMessage } from "ai"
import { openai } from "@ai-sdk/openai"
import { withSupermemory } from "../../../../../src/vercel"

const model = withSupermemory(openai("gpt-4"), "user-123", {
mode: "full",
addMemory: "always",
conversationId: "chat-session",
verbose: true,
import { convertToModelMessages, gateway, streamText, type UIMessage } from "ai"
import { withTracing } from "@posthog/ai"
import { withSupermemory } from "../../../../../src/ai-sdk"
import { PostHog } from "posthog-node"

const SUPERMEMORY_USER_ID = "user-1"

const gatewayModel = gateway("google/gemini-2.5-flash")

const supermemoryOptions = {
apiKey: process.env.SUPERMEMORY_API_KEY ?? "",
mode: "full" as const,
addMemory: "always" as const,
baseUrl: process.env.SUPERMEMORY_BASE_URL,
})
}

export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json()

const posthogApiKey = process.env.POSTHOG_API_KEY
const phClient = posthogApiKey
? new PostHog(posthogApiKey, {
host: process.env.POSTHOG_HOST ?? "https://us.i.posthog.com",
})
: null

const innerModel = phClient
? withTracing(gatewayModel, phClient, {
posthogDistinctId: SUPERMEMORY_USER_ID,
posthogProperties: { route: "api/stream" },
})
: gatewayModel

const model = withSupermemory(
innerModel,
SUPERMEMORY_USER_ID,
supermemoryOptions,
)

const result = streamText({
model,
system: "You are a helpful assistant.",
messages: convertToModelMessages(messages),
messages: await convertToModelMessages(messages),
onFinish: phClient
? async () => {
await phClient.shutdown()
}
: undefined,
})

return result.toUIMessageStreamResponse()
Expand Down
3 changes: 1 addition & 2 deletions packages/tools/test/chatapp/app/stream/page.tsx
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
"use client"

import { DefaultChatTransport } from "ai"
import { useChat } from "@ai-sdk/react"
import { DefaultChatTransport } from "ai"
import { useState } from "react"

export default function Page() {
const [input, setInput] = useState("")

const { messages, sendMessage, status } = useChat({
// @ts-expect-error - Type mismatch between ai and @ai-sdk/react versions
transport: new DefaultChatTransport({
api: "/api/stream",
}),
Expand Down
Loading
Loading