Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions specification/draft/apps.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -516,6 +516,10 @@ UI iframes can use the following subset of standard MCP protocol messages:

- `resources/read` - Read resource content

**Sampling:**

- `sampling/createMessage` - Request an LLM completion from the host (uses the standard MCP [`CreateMessageRequest`](https://modelcontextprotocol.io/specification/2025-11-25/client/sampling) / `CreateMessageResult` types, including SEP-1577 `tools` / `toolChoice` / `tool_use` content blocks). The host has full discretion over model selection and SHOULD apply rate limiting, cost controls, and user approval (human-in-the-loop). Apps MUST check `hostCapabilities.sampling` before sending this request, and `hostCapabilities.sampling.tools` before including `tools` in the request params.

**Notifications:**

- `notifications/message` - Log messages to host
Expand Down Expand Up @@ -662,6 +666,14 @@ interface HostCapabilities {
};
/** Host accepts log messages. */
logging?: {};
/**
* Host supports LLM sampling (sampling/createMessage) from the view.
* Mirrors MCP ClientCapabilities.sampling so hosts can pass it through.
*/
sampling?: {
/** Host supports tool use via `tools` and `toolChoice` params (SEP-1577). */
tools?: {};
};
/** Sandbox configuration applied by the host. */
sandbox?: {
/** Permissions granted by the host (camera, microphone, geolocation, clipboard-write). */
Expand Down
22 changes: 22 additions & 0 deletions src/app-bridge.examples.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ import type { Transport } from "@modelcontextprotocol/sdk/shared/transport.js";
import {
CallToolResult,
CallToolResultSchema,
CreateMessageRequest,
CreateMessageResult,
ListResourcesResultSchema,
ReadResourceResultSchema,
ListPromptsResultSchema,
Expand Down Expand Up @@ -228,6 +230,26 @@ function AppBridge_oncalltool_forwardToServer(
//#endregion AppBridge_oncalltool_forwardToServer
}

/**
* Example: Forward sampling requests to your LLM provider.
*/
function AppBridge_oncreatesamplingmessage_forwardToLlm(
bridge: AppBridge,
myLlmProvider: {
complete: (
params: CreateMessageRequest["params"],
opts: { signal: AbortSignal },
) => Promise<CreateMessageResult>;
},
) {
//#region AppBridge_oncreatesamplingmessage_forwardToLlm
bridge.oncreatesamplingmessage = async (params, extra) => {
// Apply rate limiting, user approval, cost controls here
return await myLlmProvider.complete(params, { signal: extra.signal });
};
//#endregion AppBridge_oncreatesamplingmessage_forwardToLlm
}

/**
* Example: Forward list resources requests to the MCP server.
*/
Expand Down
37 changes: 37 additions & 0 deletions src/app-bridge.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -712,6 +712,43 @@ describe("App <-> AppBridge integration", () => {
expect(result.content).toEqual(resultContent);
});

it("oncreatesamplingmessage setter registers handler for sampling/createMessage requests", async () => {
// Re-create bridge with sampling capability so App's capability check passes
bridge = new AppBridge(null, testHostInfo, {
...testHostCapabilities,
sampling: { tools: {} },
});

const receivedParams: unknown[] = [];
bridge.oncreatesamplingmessage = async (params) => {
receivedParams.push(params);
return {
role: "assistant",
content: { type: "text", text: "Hello from the model" },
model: "test-model",
stopReason: "endTurn",
};
};

await bridge.connect(bridgeTransport);
await app.connect(appTransport);

expect(app.getHostCapabilities()?.sampling?.tools).toEqual({});

const result = await app.createSamplingMessage({
messages: [{ role: "user", content: { type: "text", text: "Hi" } }],
maxTokens: 50,
});

expect(receivedParams).toHaveLength(1);
expect(receivedParams[0]).toMatchObject({ maxTokens: 50 });
expect(result.model).toEqual("test-model");
expect(result.content).toEqual({
type: "text",
text: "Hello from the model",
});
});

it("ondownloadfile setter registers handler for ui/download-file requests", async () => {
const downloadParams = {
contents: [
Expand Down
47 changes: 47 additions & 0 deletions src/app-bridge.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@ import {
CallToolRequestSchema,
CallToolResult,
CallToolResultSchema,
CreateMessageRequest,
CreateMessageRequestSchema,
CreateMessageResult,
CreateMessageResultWithTools,
EmptyResult,
Implementation,
ListPromptsRequest,
Expand Down Expand Up @@ -833,6 +837,49 @@ export class AppBridge extends Protocol<
});
}

/**
* Register a handler for LLM sampling requests from the view.
*
* The view sends standard MCP `sampling/createMessage` requests to obtain
* LLM completions via the host's model connection. The host has full
* discretion over which model to use and SHOULD apply rate limiting,
* cost controls, and user approval (human-in-the-loop) before sampling.
*
* Hosts that register this handler SHOULD advertise `sampling` (and
* `sampling.tools` if tool-calling is supported) in
* {@link McpUiHostCapabilities `McpUiHostCapabilities`}.
*
* @param callback - Handler that receives `CreateMessageRequest` params and
* returns a `CreateMessageResult` (or `CreateMessageResultWithTools` when
* `params.tools` was provided)
* - `params` - Standard MCP sampling params (messages, maxTokens, tools, etc.)
* - `extra` - Request metadata (abort signal, session info)
*
* @example Forward to your LLM provider
* ```ts source="./app-bridge.examples.ts#AppBridge_oncreatesamplingmessage_forwardToLlm"
* bridge.oncreatesamplingmessage = async (params, extra) => {
* // Apply rate limiting, user approval, cost controls here
* return await myLlmProvider.complete(params, { signal: extra.signal });
* };
* ```
*
* @see `CreateMessageRequest` from @modelcontextprotocol/sdk for the request type
* @see `CreateMessageResult` / `CreateMessageResultWithTools` from @modelcontextprotocol/sdk for result types
*/
set oncreatesamplingmessage(
callback: (
params: CreateMessageRequest["params"],
extra: RequestHandlerExtra,
) => Promise<CreateMessageResult | CreateMessageResultWithTools>,
) {
this.setRequestHandler(
CreateMessageRequestSchema,
async (request, extra) => {
return callback(request.params, extra);
},
);
}

/**
* Notify the view that the MCP server's tool list has changed.
*
Expand Down
48 changes: 48 additions & 0 deletions src/app.examples.ts
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,54 @@ async function App_callServerTool_fetchWeather(app: App) {
//#endregion App_callServerTool_fetchWeather
}

/**
* Example: Simple LLM completion via host sampling.
*/
async function App_createSamplingMessage_simple(app: App) {
//#region App_createSamplingMessage_simple
const result = await app.createSamplingMessage({
messages: [
{
role: "user",
content: { type: "text", text: "Summarize this in one line." },
},
],
maxTokens: 100,
});
console.log(result.content);
//#endregion App_createSamplingMessage_simple
}

/**
* Example: Agentic loop with tools (requires host sampling.tools capability).
*/
async function App_createSamplingMessage_withTools(
app: App,
messages: import("@modelcontextprotocol/sdk/types.js").SamplingMessage[],
) {
//#region App_createSamplingMessage_withTools
if (!app.getHostCapabilities()?.sampling?.tools) return;

const result = await app.createSamplingMessage({
messages,
maxTokens: 1024,
tools: [
{
name: "get_weather",
description: "Get the current weather",
inputSchema: {
type: "object",
properties: { city: { type: "string" } },
},
},
],
});
if (result.stopReason === "toolUse") {
// result.content may be an array containing tool_use blocks
}
//#endregion App_createSamplingMessage_withTools
}

/**
* Example: Send a text message from user interaction.
*/
Expand Down
92 changes: 91 additions & 1 deletion src/app.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@ import {
CallToolRequestSchema,
CallToolResult,
CallToolResultSchema,
CreateMessageRequest,
CreateMessageResult,
CreateMessageResultSchema,
CreateMessageResultWithTools,
CreateMessageResultWithToolsSchema,
EmptyResultSchema,
Implementation,
ListToolsRequest,
Expand Down Expand Up @@ -638,7 +643,15 @@ export class App extends Protocol<AppRequest, AppNotification, AppResult> {
* @internal
*/
assertCapabilityForMethod(method: AppRequest["method"]): void {
// TODO
switch (method) {
case "sampling/createMessage":
if (!this._hostCapabilities?.sampling) {
throw new Error(
`Host does not support sampling (required for ${method})`,
);
}
break;
}
}

/**
Expand Down Expand Up @@ -739,6 +752,83 @@ export class App extends Protocol<AppRequest, AppNotification, AppResult> {
);
}

/**
* Request an LLM completion from the host (standard MCP `sampling/createMessage`).
*
* Enables the app to use the host's model connection for completions. The host
* has full discretion over which model to select and MAY modify or reject the
* request (human-in-the-loop). Check {@link getHostCapabilities `getHostCapabilities`}`()?.sampling`
* before calling — hosts without this capability will reject the request.
*
* This method reuses the stock MCP `CreateMessageRequest` shape. When `params.tools`
* is provided, the result is parsed with the extended schema that permits
* `stopReason: "toolUse"` and array content containing `tool_use` blocks.
*
* @param params - Standard MCP `CreateMessageRequest` params (messages, maxTokens,
* systemPrompt, temperature, modelPreferences, tools, toolChoice, etc.)
* @param options - Request options (timeout, abort signal)
* @returns `CreateMessageResult` (single content block) or `CreateMessageResultWithTools`
* (array content, may include `tool_use` blocks) depending on whether `tools` was set
*
* @throws {Error} If the host rejects the request or does not support sampling
* @throws {Error} If the request times out or the connection is lost
*
* @example Simple completion
* ```ts source="./app.examples.ts#App_createSamplingMessage_simple"
* const result = await app.createSamplingMessage({
* messages: [
* { role: "user", content: { type: "text", text: "Summarize this in one line." } },
* ],
* maxTokens: 100,
* });
* console.log(result.content);
* ```
*
* @example Agentic loop with tools
* ```ts source="./app.examples.ts#App_createSamplingMessage_withTools"
* if (!app.getHostCapabilities()?.sampling?.tools) return;
*
* const result = await app.createSamplingMessage({
* messages,
* maxTokens: 1024,
* tools: [
* {
* name: "get_weather",
* description: "Get the current weather",
* inputSchema: { type: "object", properties: { city: { type: "string" } } },
* },
* ],
* });
* if (result.stopReason === "toolUse") {
* // result.content may be an array containing tool_use blocks
* }
* ```
*
* @see `CreateMessageRequest` from @modelcontextprotocol/sdk for the request type
* @see `CreateMessageResult` / `CreateMessageResultWithTools` from @modelcontextprotocol/sdk for result types
*/
async createSamplingMessage(
params: CreateMessageRequest["params"] & { tools?: undefined },
options?: RequestOptions,
): Promise<CreateMessageResult>;
async createSamplingMessage(
params: CreateMessageRequest["params"],
options?: RequestOptions,
): Promise<CreateMessageResultWithTools>;
async createSamplingMessage(
params: CreateMessageRequest["params"],
options?: RequestOptions,
): Promise<CreateMessageResult | CreateMessageResultWithTools> {
const resultSchema = params.tools
? CreateMessageResultWithToolsSchema
: CreateMessageResultSchema;
return await this.request(
{ method: "sampling/createMessage", params },
resultSchema,
options,
);
}

/**
* Send a message to the host's chat interface.
*
Expand Down
26 changes: 26 additions & 0 deletions src/generated/schema.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

18 changes: 18 additions & 0 deletions src/generated/schema.ts

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading
Loading