feat: Add sampling support for MCP (#9395)

* squash commits for elicitation support
* add basic mcp support
* update ai plugin version
This commit is contained in:
Kent Wang
2025-12-15 16:29:34 +08:00
committed by GitHub
parent ae5f212404
commit 64a769bb77
23 changed files with 625 additions and 107 deletions

8
package-lock.json generated
View File

@@ -4021,9 +4021,9 @@
"license": "MIT"
},
"node_modules/@kong/insomnia-plugin-ai": {
"version": "1.0.6",
"resolved": "https://npm.pkg.github.com/download/@kong/insomnia-plugin-ai/1.0.6/a140bd09efb5f37d2ab8c4544b4ac20360a45029",
"integrity": "sha512-J8HDW6YdT9yt0Nj/S/40xUdUXaEuCWFvVefrw3k2ohxyCi5VWqjsaVY3D29CvLubkfHZ9hbyUcVdY+b44w3cHA==",
"version": "1.0.7",
"resolved": "https://npm.pkg.github.com/download/@kong/insomnia-plugin-ai/1.0.7/d91cc8d272c355aec747d60d996dab8f0ac24448",
"integrity": "sha512-3o2Q414fF1zRe/i8CHyZXFSSY+gM6sVc6BKpx3URzJhjnKaFmO96pQqcZYX3oM4KFC0yhQsDM2+/EmNOit/9oQ==",
"optional": true,
"dependencies": {
"@apidevtools/swagger-parser": "^10.1.0",
@@ -30292,7 +30292,7 @@
"vite": "^7.1.3"
},
"optionalDependencies": {
"@kong/insomnia-plugin-ai": "^1.0.6",
"@kong/insomnia-plugin-ai": "^1.0.7",
"@kong/insomnia-plugin-external-vault": "0.1.3"
}
},

View File

@@ -201,7 +201,7 @@
"vite": "^7.1.3"
},
"optionalDependencies": {
"@kong/insomnia-plugin-ai": "^1.0.6",
"@kong/insomnia-plugin-ai": "^1.0.7",
"@kong/insomnia-plugin-external-vault": "0.1.3"
},
"dev": {

View File

@@ -102,6 +102,11 @@ export const NOTIFICATIONS_LIST_CHANGED: string[] = [
METHOD_NOTIFICATION_TOOL_LIST_CHANGED,
METHOD_NOTIFICATION_PROMPT_LIST_CHANGED,
];
export const MCP_SERVER_REQUEST_METHODS: string[] = [
METHOD_SAMPLING_CREATE_MESSAGE,
METHOD_ELICITATION_CREATE_MESSAGE,
METHOD_LIST_ROOTS,
];
export type McpServerMethods = (typeof SERVER_METHODS)[number];
export type NotificationMethods = (typeof NOTIFICATION_METHODS)[number];
@@ -146,9 +151,11 @@ export const getMcpMethodFromMessage = (message: JSONRPCMessage): McpMessageEven
}
} else if (ServerRequestSchema.safeParse(message).success) {
const requestMethod = ServerRequestSchema.parse(message).method;
// Do not support any server requests to client including ping, elicitation and sampling
// Support elicitation, sampling and listing roots requests from server
method =
requestMethod === METHOD_LIST_ROOTS || requestMethod === METHOD_ELICITATION_CREATE_MESSAGE
requestMethod === METHOD_ELICITATION_CREATE_MESSAGE ||
requestMethod === METHOD_SAMPLING_CREATE_MESSAGE ||
requestMethod === METHOD_LIST_ROOTS
? requestMethod
: `${unsupportedMethodPrefix}${requestMethod}`;
}

View File

@@ -1,10 +1,12 @@
import { contextBridge, ipcRenderer, webUtils as webUtilities } from 'electron';
import type { LLMBackend, LLMConfig, LLMConfigServiceAPI } from '~/main/llm-config-service';
import type { GenerateMcpSamplingResponseFunction } from '~/plugins/types';
import type { GitServiceAPI } from './main/git-service';
import type { gRPCBridgeAPI } from './main/ipc/grpc';
import type { secretStorageBridgeAPI } from './main/ipc/secret-storage';
import type { AIFeatureNames } from './main/llm-config-service';
import type { CurlBridgeAPI } from './main/network/curl';
import type { McpBridgeAPI } from './main/network/mcp';
import type { SocketIOBridgeAPI } from './main/network/socket-io';
@@ -76,6 +78,7 @@ const mcp: McpBridgeAPI = {
},
client: {
responseElicitationRequest: options => ipcRenderer.send('mcp.client.responseElicitationRequest', options),
responseSamplingRequest: options => ipcRenderer.send('mcp.client.responseSamplingRequest', options),
hasRequestResponded: options => ipcRenderer.invoke('mcp.client.hasRequestResponded', options),
cancelRequest: options => ipcRenderer.invoke('mcp.client.cancelRequest', options),
},
@@ -156,9 +159,8 @@ const llm: LLMConfigServiceAPI = {
ipcRenderer.invoke('llm.updateBackendConfig', backend, config),
getAllConfigurations: () => ipcRenderer.invoke('llm.getAllConfigurations'),
getCurrentConfig: () => ipcRenderer.invoke('llm.getCurrentConfig'),
getAIFeatureEnabled: (feature: 'aiMockServers' | 'aiCommitMessages') =>
ipcRenderer.invoke('llm.getAIFeatureEnabled', feature),
setAIFeatureEnabled: (feature: 'aiMockServers' | 'aiCommitMessages', enabled: boolean) =>
getAIFeatureEnabled: (feature: AIFeatureNames) => ipcRenderer.invoke('llm.getAIFeatureEnabled', feature),
setAIFeatureEnabled: (feature: AIFeatureNames, enabled: boolean) =>
ipcRenderer.invoke('llm.setAIFeatureEnabled', feature, enabled),
};
@@ -257,6 +259,8 @@ const main: Window['main'] = {
),
generateCommitsFromDiff: (input: { diff: string; recent_commits: string }) =>
ipcRenderer.invoke('generateCommitsFromDiff', input),
generateMcpSamplingResponse: (parameters: Parameters<GenerateMcpSamplingResponseFunction>[0]) =>
ipcRenderer.invoke('generateMcpSamplingResponse', parameters),
};
ipcRenderer.on('hidden-browser-window-response-listener', event => {

View File

@@ -24,6 +24,9 @@ export type HandleChannels =
| 'authorizeUserInWindow'
| 'backup'
| 'cancelAuthorizationInDefaultBrowser'
| 'generateMockRouteDataFromSpec'
| 'generateCommitsFromDiff'
| 'generateMcpSamplingResponse'
| 'curl.event.findMany'
| 'curl.open'
| 'curl.readyState'
@@ -180,6 +183,7 @@ export type MainOnChannels =
| 'webSocket.closeAll'
| 'mcp.closeAll'
| 'mcp.client.responseElicitationRequest'
| 'mcp.client.responseSamplingRequest'
| 'mcp.sendMCPRequest'
| 'writeText';

View File

@@ -21,7 +21,12 @@ import { convert } from '~/main/importers/convert';
import { getCurrentConfig, type LLMConfigServiceAPI } from '~/main/llm-config-service';
import { multipartBufferToArray, type Part } from '~/main/multipart-buffer-to-array';
import { insecureReadFile, insecureReadFileWithEncoding, secureReadFile } from '~/main/secure-read-file';
import type { GenerateCommitsFromDiffFunction, MockRouteData, ModelConfig } from '~/plugins/types';
import type {
GenerateCommitsFromDiffFunction,
GenerateMcpSamplingResponseFunction,
MockRouteData,
ModelConfig,
} from '~/plugins/types';
import type { HiddenBrowserWindowBridgeAPI } from '../../entry.hidden-window';
import * as models from '../../models';
@@ -164,6 +169,12 @@ export interface RendererToMainBridgeAPI {
| { commits: Awaited<ReturnType<GenerateCommitsFromDiffFunction>>; error: undefined }
| { commits: undefined; error: string }
>;
generateMcpSamplingResponse: (
input: Parameters<GenerateMcpSamplingResponseFunction>[0],
) => Promise<
| { response: Awaited<ReturnType<GenerateMcpSamplingResponseFunction>>; error: undefined }
| { response: undefined; error: string }
>;
}
export function registerMainHandlers() {
@@ -483,4 +494,63 @@ export function registerMainHandlers() {
});
});
});
ipcMainHandle('generateMcpSamplingResponse', async (_, input: Parameters<GenerateMcpSamplingResponseFunction>[0]) => {
return new Promise(async (resolve, reject) => {
const modelConfig = (await getCurrentConfig()) as ModelConfig | null;
if (!modelConfig) {
reject(new Error('No LLM model configured'));
}
const process = utilityProcess.fork(path.join(__dirname, 'main/mcp-generate-sampling-response.mjs'));
process.on('exit', code => {
console.log('[mcp-generate-sampling-response-process] exited with code:', code);
let errorMessage: string;
const signals = os.constants.signals;
if (code === 0) {
errorMessage = 'MCP sampling response generation process exited with code 0.';
} else if (code === signals.SIGSEGV) {
errorMessage = `MCP sampling response generation process crashed with a segmentation fault (SIGSEGV). This may be due to system compatibility when running a GGUF model.`;
} else if (code === signals.SIGKILL) {
errorMessage = `MCP sampling response generation process was killed (SIGKILL). This may be due to memory limits or system resources.`;
} else if (code === signals.SIGTERM) {
errorMessage = `MCP sampling response generation process was terminated (SIGTERM).`;
} else if (code === signals.SIGABRT) {
errorMessage = `MCP sampling response generation process aborted (SIGABRT). This usually indicates an internal error.`;
} else {
errorMessage = `MCP sampling response generation process exited unexpectedly with code ${code}.`;
}
resolve({ error: errorMessage });
});
process.on('message', msg => {
console.log('[mcp-generate-sampling-response-process] received message');
resolve({
response: {
content: msg,
modelConfig,
},
});
process.kill();
});
process.on('error', err => {
console.error('[mcp-generate-sampling-response-process] error:', err);
reject({ error: err.toString() });
});
const { systemPrompt, messages, modelConfig: modelConfigFromSamplingRequest } = input;
process.postMessage({
messages,
systemPrompt,
modelConfig: {
...modelConfig,
...modelConfigFromSamplingRequest,
},
aiPluginName: AI_PLUGIN_NAME,
});
});
});
}

View File

@@ -23,6 +23,7 @@ export interface LLMConfig {
seed?: boolean;
repeatPenalty?: number;
}
export type AIFeatureNames = 'aiMockServers' | 'aiCommitMessages' | 'aiMcpClient';
export const getActiveBackend = async (): Promise<LLMBackend | null> => {
const active = await models.pluginData.getByKey(LLM_PLUGIN_NAME, 'model.active');
@@ -112,15 +113,12 @@ export const getCurrentConfig = async (): Promise<LLMConfig | null> => {
return { ...config, backend: activeBackend } as LLMConfig;
};
export const getAIFeatureEnabled = async (feature: 'aiMockServers' | 'aiCommitMessages'): Promise<boolean> => {
export const getAIFeatureEnabled = async (feature: AIFeatureNames): Promise<boolean> => {
const data = await models.pluginData.getByKey(LLM_PLUGIN_NAME, `feature.${feature}`);
return data?.value === 'true';
};
export const setAIFeatureEnabled = async (
feature: 'aiMockServers' | 'aiCommitMessages',
enabled: boolean,
): Promise<void> => {
export const setAIFeatureEnabled = async (feature: AIFeatureNames, enabled: boolean): Promise<void> => {
await models.pluginData.upsertByKey(LLM_PLUGIN_NAME, `feature.${feature}`, String(enabled));
trackSegmentEvent(enabled ? SegmentEvent.aiFeatureEnabled : SegmentEvent.aiFeatureDisabled, {

View File

@@ -0,0 +1,20 @@
/* eslint-disable no-undef */
console.log('[mcp-generate-sampling-response-process] Sampling response generation worker started');
process.on('uncaughtException', error => {
console.error('[mcp-generate-sampling-response-process] Uncaught exception:', error);
process.parentPort.postMessage({ error: error.message });
});
process.parentPort.on('message', async ({ data: { messages, systemPrompt, modelConfig, aiPluginName } }) => {
try {
const { generateMcpSamplingResponse } = await import(aiPluginName);
const response = await generateMcpSamplingResponse(messages, systemPrompt, modelConfig);
console.log('[mcp-generate-sampling-response-process] Successfully generating sampling responses');
process.parentPort.postMessage(response);
} catch (error) {
const errorMessage = 'Failed to generate mcp sampling response: ' + error.message;
console.error('[mcp-generate-sampling-response-process]', errorMessage);
process.parentPort.postMessage({ error: errorMessage });
}
});

View File

@@ -1,6 +1,7 @@
import {
type CallToolRequest,
CompatibilityCallToolResultSchema,
type CreateMessageResult,
type GetPromptRequest,
type ListPromptsRequest,
type ListResourcesRequest,
@@ -186,3 +187,45 @@ export const responseElicitationRequest = (
mcpServerElicitationRequests.delete(serverRequestId);
}
};
export const responseSamplingRequest = (
options: CommonMcpOptions &
(
| {
serverRequestId: string;
type: 'approve';
result: CreateMessageResult;
}
| {
serverRequestId: string;
type: 'reject';
reason: string;
}
),
) => {
const { serverRequestId, type, requestId } = options;
const context = getReadyActiveMcpConnectionContext(requestId);
if (!context) {
return;
}
const { mcpServerSamplingRequests } = context;
if (mcpServerSamplingRequests) {
const serverRequestResolver = mcpServerSamplingRequests.get(serverRequestId);
if (serverRequestResolver) {
switch (options.type) {
case 'approve': {
serverRequestResolver.resolve(options.result);
break;
}
case 'reject': {
serverRequestResolver.reject(new Error(options.reason || 'User rejected the sampling request'));
break;
}
default: {
throw new Error(`Unknown server request response type: ${type}`);
}
}
mcpServerSamplingRequests.delete(serverRequestId);
}
}
};

View File

@@ -3,6 +3,8 @@ import path from 'node:path';
import {
type CancelledNotification,
type CreateMessageResult,
CreateMessageResultSchema,
type ElicitResult,
ElicitResultSchema,
JSONRPCErrorSchema,
@@ -15,6 +17,7 @@ import { v4 as uuidV4 } from 'uuid';
import { REALTIME_EVENTS_CHANNELS } from '~/common/constants';
import {
MCP_SERVER_REQUEST_METHODS,
METHOD_ELICITATION_CREATE_MESSAGE,
METHOD_JSONRPC_ERROR,
METHOD_LIST_ROOTS,
@@ -81,6 +84,10 @@ export type ConnectionContext = {
string | number,
{ resolve: (value: ElicitResult) => void; reject: (reason?: any) => void }
>;
mcpServerSamplingRequests: Map<
string | number,
{ resolve: (value: CreateMessageResult) => void; reject: (reason?: any) => void }
>;
mcpRequestAbortControllers: Map<string, AbortController>;
// Abort controller for this specific connection
abortController: AbortController;
@@ -119,6 +126,7 @@ export const createConnectionContext = async (
const pendingEventIds: { jsonRPCId: string; eventId: string; direction: McpEventDirection }[] = [];
const mcpServerElicitationRequests = new Map();
const mcpServerSamplingRequests = new Map();
const mcpRequestAbortControllers = new Map();
@@ -146,6 +154,7 @@ export const createConnectionContext = async (
abortController,
environmentId,
mcpServerElicitationRequests,
mcpServerSamplingRequests,
mcpRequestAbortControllers,
options,
status: 'connecting',
@@ -265,10 +274,7 @@ export const writeEventLogAndNotify = (
const isUnsupportedMethod = eventMethod.startsWith(unsupportedMethodPrefix);
// for server response with error like { method: 'JSON-RPC Error', type: 'message', data: {…}}
const isJsonRPCError = eventMethod === METHOD_JSONRPC_ERROR;
const isServerRequest =
eventMethod === METHOD_ELICITATION_CREATE_MESSAGE ||
eventMethod === METHOD_SAMPLING_CREATE_MESSAGE ||
eventMethod === METHOD_LIST_ROOTS;
const isServerRequest = MCP_SERVER_REQUEST_METHODS.includes(eventMethod);
if (eventMethod === METHOD_NOTIFICATION_CANCELLED) {
// find the cancelled notification message indicates cancellation of the request
removePendingEvent(e => e.jsonRPCId === (data as CancelledNotification).params.requestId);
@@ -319,6 +325,8 @@ export const parseAndLogMcpRequest = (context: ConnectionContext, message: any)
requestMethod = METHOD_LIST_ROOTS;
} else if (ElicitResultSchema.safeParse(message?.result).success) {
requestMethod = METHOD_ELICITATION_CREATE_MESSAGE;
} else if (CreateMessageResultSchema.safeParse(message?.result).success) {
requestMethod = METHOD_SAMPLING_CREATE_MESSAGE;
} else if (JSONRPCErrorSchema.safeParse(message).success) {
requestMethod = METHOD_JSONRPC_ERROR;
} else {
@@ -380,10 +388,12 @@ export const hasRequestResponded = async ({
}: CommonMcpOptions & { serverRequestId: string }) => {
const hasResponded = true;
const context = getReadyActiveMcpConnectionContext(requestId);
const pendingServerRequestResolvers = context?.mcpServerElicitationRequests;
if (pendingServerRequestResolvers) {
return !pendingServerRequestResolvers.has(serverRequestId);
if (context) {
const { mcpServerElicitationRequests, mcpServerSamplingRequests } = context;
return !mcpServerElicitationRequests.has(serverRequestId) && !mcpServerSamplingRequests.has(serverRequestId);
}
return hasResponded;
};

View File

@@ -89,6 +89,5 @@ export interface OpenMcpHTTPClientConnectionOptions extends CommonMcpOptions {
}
export type OpenMcpClientConnectionOptions = OpenMcpHTTPClientConnectionOptions | OpenMcpStdioClientConnectionOptions;
export type McpReadyState = 'disconnected' | 'connecting' | 'connected';
export type McpEventDirection = 'INCOMING' | 'OUTGOING';

View File

@@ -5,6 +5,7 @@ import type { AnySchema } from '@modelcontextprotocol/sdk/server/zod-compat.js';
import type { RequestOptions } from '@modelcontextprotocol/sdk/shared/protocol.js';
import {
CancelledNotificationSchema,
CreateMessageRequestSchema,
ElicitRequestSchema,
EmptyResultSchema,
JSONRPCErrorSchema,
@@ -29,6 +30,7 @@ import {
listTools,
readResource,
responseElicitationRequest,
responseSamplingRequest,
sendRootListChangeNotification,
subscribeResource,
unsubscribeResource,
@@ -296,7 +298,7 @@ const openMcpClientConnection = async (options: OpenMcpClientConnectionOptions)
};
const performConnection = async (context: ConnectionContext) => {
const { abortController, options, requestId, mcpServerElicitationRequests } = context;
const { abortController, options, requestId, mcpServerElicitationRequests, mcpServerSamplingRequests } = context;
// Check if the connection has been aborted before proceeding
if (abortController.signal.aborted) {
clearConnectionContext(context);
@@ -318,6 +320,8 @@ const performConnection = async (context: ConnectionContext) => {
// declare the client to support elicitation
elicitation: {},
// declare the client to support sampling
sampling: {},
},
},
) as McpClient;
@@ -361,13 +365,25 @@ const performConnection = async (context: ConnectionContext) => {
});
});
// add sampling request handler to indicate the client supports it
mcpClient.setRequestHandler(CreateMessageRequestSchema, async (_request, extra) => {
return new Promise((resolve, reject) => {
const serverRequestId = extra.requestId;
mcpServerSamplingRequests.set(serverRequestId, { resolve, reject });
});
});
mcpClient.setNotificationHandler(CancelledNotificationSchema, notification => {
const serverRequestId = notification.params.requestId;
// handle server request cancellation
if (mcpServerElicitationRequests.has(serverRequestId)) {
console.log('Received server request cancellation notification', serverRequestId);
console.log('Received server request cancellation notification for elicitation request', serverRequestId);
mcpServerElicitationRequests.delete(serverRequestId);
}
if (mcpServerSamplingRequests.has(serverRequestId)) {
console.log('Received server request cancellation notification for sampling request', serverRequestId);
mcpServerSamplingRequests.delete(serverRequestId);
}
});
const originClientRequest = mcpClient.request.bind(mcpClient);
mcpClient.request = <T extends AnySchema>(request: Request, resultSchema: T, options?: RequestOptions) => {
@@ -468,6 +484,7 @@ export interface McpBridgeAPI {
};
client: {
responseElicitationRequest: typeof responseElicitationRequest;
responseSamplingRequest: typeof responseSamplingRequest;
hasRequestResponded: typeof hasRequestResponded;
cancelRequest: typeof cancelRequest;
};
@@ -523,6 +540,9 @@ export const registerMcpHandlers = () => {
ipcMainOn('mcp.client.responseElicitationRequest', (_, options: Parameters<typeof responseElicitationRequest>[0]) =>
responseElicitationRequest(options),
);
ipcMainOn('mcp.client.responseSamplingRequest', (_, options: Parameters<typeof responseSamplingRequest>[0]) =>
responseSamplingRequest(options),
);
ipcMainHandle('mcp.client.hasRequestResponded', (_, options: Parameters<typeof hasRequestResponded>[0]) =>
hasRequestResponded(options),
);

View File

@@ -24,6 +24,11 @@ export interface ModelConfig {
repeatPenalty?: number;
}
export interface MultiTurnMessage {
role: 'user' | 'assistant';
content: string;
}
export interface MockRouteData {
path: string;
method: string;
@@ -45,3 +50,9 @@ export type GenerateCommitsFromDiffFunction = (
files: string[];
}[]
>;
export type GenerateMcpSamplingResponseFunction = (parameters: {
systemPrompt?: string;
messages: MultiTurnMessage[];
modelConfig: Pick<ModelConfig, 'maxTokens' | 'temperature'>;
}) => Promise<{ content: string; modelConfig: ModelConfig }>;

View File

@@ -0,0 +1,92 @@
import { href } from 'react-router';
import type { MultiTurnMessage } from '~/plugins/types';
import { showToast } from '~/ui/components/toast-notification';
import { createFetcherSubmitHook } from '~/utils/router';
import type { Route } from './+types/ai.mcp-generate-sampling-response';
interface RequestData {
messages: MultiTurnMessage[];
maxTokens: number;
requestId: string;
serverRequestId: string;
temperature?: number;
systemPrompt?: string;
}
export async function clientAction(args: Route.ClientActionArgs) {
const { messages, maxTokens, temperature, systemPrompt, requestId, serverRequestId } =
(await args.request.json()) as RequestData;
try {
const isFeatureEnabled = await window.main.llm.getAIFeatureEnabled('aiMcpClient');
const hasActiveLLM = (await window.main.llm.getCurrentConfig()) !== null;
if (!isFeatureEnabled || !hasActiveLLM) {
return {
error: 'Enable MCP LLM integration with AI in Insomnia Preferences → AI Settings to use this feature.',
};
}
const { response, error } = await window.main.generateMcpSamplingResponse({
systemPrompt,
messages,
modelConfig: {
maxTokens,
temperature,
},
});
if (!response) {
showToast({
title: 'Failed to generate sampling response',
icon: 'star',
status: 'error',
description: `The AI service returned invalid data. Please try again. ${error}`,
});
return {
error: `The AI service returned invalid data. Please try again. ${error}`,
};
}
// Response sampling request with AI-generated response
window.main.mcp.client.responseSamplingRequest({
requestId,
serverRequestId,
type: 'approve',
result: {
content: {
type: 'text',
text: response.content,
},
model: response.modelConfig.model,
role: 'assistant',
},
});
return { response };
} catch (err) {
const errorMessage = err instanceof Error ? err.message : String(err);
showToast({
title: 'Failed to generate sampling response',
icon: 'star',
status: 'error',
description: `There was an error communicating with the AI service. Please try again. ${errorMessage}`,
});
return {
error: `There was an error communicating with the AI service. Please try again. ${errorMessage}`,
};
}
}
export const useAIGenerateActionFetcher = createFetcherSubmitHook(
submit => (data: RequestData) => {
submit(JSON.stringify(data), {
action: href('/ai/mcp-generate-sampling-response'),
method: 'POST',
encType: 'application/json',
});
},
clientAction,
);

View File

@@ -14,6 +14,7 @@ export const fallbackFeatures = Object.freeze<FeatureList>({
orgBasicRbac: { enabled: false, reason: 'Insomnia API unreachable' },
aiMockServers: { enabled: false, reason: 'Insomnia API unreachable' },
aiCommitMessages: { enabled: false, reason: 'Insomnia API unreachable' },
aiMcpClient: { enabled: false, reason: 'Insomnia API unreachable' },
});
// If network unreachable assume user has paid for the current period

View File

@@ -83,6 +83,7 @@ export interface FeatureList {
orgBasicRbac: FeatureStatus;
aiMockServers: FeatureStatus;
aiCommitMessages: FeatureStatus;
aiMcpClient: FeatureStatus;
}
export interface Billing {

View File

@@ -0,0 +1,83 @@
import type { RJSFSchema, UiSchema } from '@rjsf/utils';
import { useRef, useState } from 'react';
import { Button, Toolbar } from 'react-aria-components';
import { InsomniaRjsfForm, type InsomniaRjsfFormHandle } from '~/ui/components/rjsf';
interface ElicitationFormProps {
requestId: string;
serverRequestId: string;
schema: RJSFSchema;
}
const uiSchema: UiSchema = {
'ui:submitButtonOptions': {
norender: true,
},
};
export const ElicitationForm = ({ requestId, serverRequestId, schema }: ElicitationFormProps) => {
const rjsfFormRef = useRef<InsomniaRjsfFormHandle>(null);
const [formData, setFormData] = useState({});
const handleRjsfFormChange = (formData: any) => {
setFormData(formData);
};
return (
<div className="flex grow flex-col overflow-hidden">
<div className="h-[calc(100%-var(--line-height-sm))] overflow-auto bg-inherit px-5 py-1">
<InsomniaRjsfForm
formData={formData}
onChange={handleRjsfFormChange}
schema={schema}
uiSchema={uiSchema}
ref={rjsfFormRef}
showErrorList={false}
focusOnFirstError
/>
</div>
<Toolbar className="content-box sticky bottom-0 z-10 flex h-(--line-height-sm) shrink-0 gap-3 border-b border-(--hl-md) bg-(--color-bg) px-5 py-2 text-(--font-size-sm)">
<Button
onPress={() => {
if (rjsfFormRef.current?.validate()) {
window.main.mcp.client.responseElicitationRequest({
requestId,
serverRequestId,
type: 'submit',
content: formData,
});
}
}}
className="rounded-sm bg-(--color-surprise) px-(--padding-md) text-center text-(--color-font-surprise) hover:brightness-75"
>
Submit
</Button>
<Button
onPress={() =>
window.main.mcp.client.responseElicitationRequest({
requestId,
serverRequestId,
type: 'decline',
})
}
className="rounded-md border border-solid border-(--hl-lg) bg-(--color-bg) px-(--padding-md) text-center"
>
Decline
</Button>
<Button
onPress={() =>
window.main.mcp.client.responseElicitationRequest({
requestId,
serverRequestId,
type: 'cancel',
})
}
className="rounded-md border border-solid border-(--hl-lg) bg-(--color-bg) px-(--padding-md) text-center"
>
Cancel
</Button>
</Toolbar>
</div>
);
};

View File

@@ -1,11 +1,13 @@
import { CallToolResultSchema, ElicitRequestSchema } from '@modelcontextprotocol/sdk/types.js';
import { type RJSFSchema, type UiSchema } from '@rjsf/utils';
import {
CallToolResultSchema,
CreateMessageRequestSchema,
ElicitRequestSchema,
} from '@modelcontextprotocol/sdk/types.js';
import { type RJSFSchema } from '@rjsf/utils';
import React, { useCallback, useEffect, useRef, useState } from 'react';
import { Button, Toolbar } from 'react-aria-components';
import { Button } from 'react-aria-components';
import { useParams } from 'react-router';
import { InsomniaRjsfForm, type InsomniaRjsfFormHandle } from '~/ui/components/rjsf';
import {
getPreviewModeName,
PREVIEW_MODE_FRIENDLY,
@@ -23,24 +25,18 @@ import {
import { CodeEditor, type CodeEditorHandle } from '../../components/.client/codemirror/code-editor';
import { useRequestMetaPatcher } from '../../hooks/use-request';
import { Dropdown, DropdownItem, DropdownSection, ItemContent } from '../base/dropdown';
import { ElicitationForm } from './elicitation-form';
import { SamplingForm } from './sampling-form';
interface Props {
event: McpEvent;
}
const uiSchema: UiSchema = {
'ui:submitButtonOptions': {
norender: true,
},
};
export const MessageEventView = ({ event }: Props) => {
const { activeRequestMeta, activeResponse } = useRequestLoaderData() as McpRequestLoaderData;
const filterHistory = activeRequestMeta.responseFilterHistory || [];
const filter = activeRequestMeta.responseFilter || '';
const [formData, setFormData] = useState({});
const [isServerRequestResponded, setIsServerRequestResponded] = useState(true);
const rjsfFormRef = useRef<InsomniaRjsfFormHandle>(null);
const editorRef = useRef<CodeEditorHandle>(null);
const { requestId } = useParams() as { requestId: string };
@@ -49,6 +45,8 @@ export const MessageEventView = ({ event }: Props) => {
const eventData = isErrorEvent ? event.error : 'data' in event ? event.data : '';
const raw = JSON.stringify(eventData);
const isElicitationRequest = ElicitRequestSchema.safeParse(eventData).success;
const samplingRequestParseResult = CreateMessageRequestSchema.safeParse(eventData);
const isSamplingRequest = samplingRequestParseResult.success;
const [viewMode, setViewMode] = useState<'raw' | 'form'>('raw');
const handleDownloadResponseBody = useCallback(async () => {
@@ -95,15 +93,11 @@ export const MessageEventView = ({ event }: Props) => {
if (ElicitRequestSchema.safeParse(eventData).success) {
const parsedElicitRequest = ElicitRequestSchema.parse(eventData);
const requestSchema = parsedElicitRequest.params.requestedSchema;
return requestSchema;
return requestSchema as RJSFSchema;
}
return {};
};
const handleRjsfFormChange = (formData: any) => {
setFormData(formData);
};
let pretty = raw;
try {
const parsed = JSON.parse(raw);
@@ -152,10 +146,10 @@ export const MessageEventView = ({ event }: Props) => {
setViewMode('form');
}
};
if (isElicitationRequest) {
if (isElicitationRequest || isSamplingRequest) {
checkRequestCompleted();
}
}, [requestId, eventData?.id, isElicitationRequest]);
}, [requestId, eventData?.id, isElicitationRequest, isSamplingRequest]);
return (
<div className="flex h-full flex-col">
@@ -194,18 +188,18 @@ export const MessageEventView = ({ event }: Props) => {
</DropdownItem>
</DropdownSection>
</Dropdown>
{isElicitationRequest && !isServerRequestResponded && (
{!isServerRequestResponded && (
<Button
className={`mx-2 mt-2 px-2 text-(--color-font) outline-hidden transition-colors duration-300 hover:bg-(--hl-sm) hover:text-(--color-font) focus:bg-(--hl-sm) ${
className={`px-2 text-(--color-font) outline-hidden transition-colors duration-300 hover:bg-(--hl-sm) hover:text-(--color-font) focus:bg-(--hl-sm) ${
viewMode === 'form' ? 'bg-(--hl-xs) text-(--color-font)' : ''
}`}
onPress={() => setViewMode('form')}
>
Elicitation Form
{isElicitationRequest ? 'Elicitation Form' : 'Sampling Form'}
</Button>
)}
</div>
{viewMode === 'raw' ? (
{viewMode === 'raw' && (
<div className="h-full grow p-4">
<CodeEditor
id="mcp-data-preview"
@@ -221,61 +215,16 @@ export const MessageEventView = ({ event }: Props) => {
autoPrettify
/>
</div>
) : (
<div className="flex grow flex-col overflow-hidden">
<div className="h-[calc(100%-var(--line-height-sm))] overflow-auto bg-inherit px-5 py-1">
<InsomniaRjsfForm
formData={formData}
onChange={handleRjsfFormChange}
schema={getElicitationFormSchema() as RJSFSchema}
uiSchema={uiSchema}
ref={rjsfFormRef}
showErrorList={false}
focusOnFirstError
/>
</div>
<Toolbar className="content-box sticky bottom-0 z-10 flex h-(--line-height-sm) shrink-0 gap-3 border-b border-(--hl-md) bg-(--color-bg) px-5 py-2 text-(--font-size-sm)">
<Button
onPress={() => {
if (rjsfFormRef.current?.validate()) {
window.main.mcp.client.responseElicitationRequest({
requestId,
serverRequestId: eventData?.id,
type: 'submit',
content: formData,
});
}
}}
className="rounded-sm bg-(--color-surprise) px-(--padding-md) text-center text-(--color-font-surprise) hover:brightness-75"
>
Submit
</Button>
<Button
onPress={() =>
window.main.mcp.client.responseElicitationRequest({
requestId,
serverRequestId: eventData?.id,
type: 'decline',
})
}
className="rounded-md border border-solid border-(--hl-lg) bg-(--color-bg) px-(--padding-md) text-center"
>
Decline
</Button>
<Button
onPress={() =>
window.main.mcp.client.responseElicitationRequest({
requestId,
serverRequestId: eventData?.id,
type: 'cancel',
})
}
className="rounded-md border border-solid border-(--hl-lg) bg-(--color-bg) px-(--padding-md) text-center"
>
Cancel
</Button>
</Toolbar>
</div>
)}
{viewMode === 'form' && isElicitationRequest && (
<ElicitationForm schema={getElicitationFormSchema()} requestId={requestId} serverRequestId={eventData?.id} />
)}
{viewMode === 'form' && isSamplingRequest && (
<SamplingForm
requestId={requestId}
serverRequestId={eventData?.id}
samplingData={samplingRequestParseResult.data}
/>
)}
</div>
);

View File

@@ -197,7 +197,7 @@ export const McpUrlActionBar = ({
let answered = false;
showModal(AskModal, {
title: 'MCP Authentication Confirmation',
message: 'The MCP server is requesting authentication to proceed. Do you wish to continue?',
message: 'The MCP server is requesting OAuth Authorization Flow to proceed. Do you wish to continue?',
onDone: async (yes: boolean) => {
if (answered) {
console.error('Already answered MCP auth confirmation, this should not happen.');

View File

@@ -0,0 +1,150 @@
import type { CreateMessageRequest, CreateMessageResult } from '@modelcontextprotocol/sdk/types.js';
import type { RJSFSchema, UiSchema } from '@rjsf/utils/lib/types.js';
import { useRef, useState } from 'react';
import { Button, Toolbar } from 'react-aria-components';
import type { MultiTurnMessage } from '~/plugins/types';
import { useAIGenerateActionFetcher } from '~/routes/ai.mcp-generate-sampling-response';
import { Icon } from '~/ui/components/icon';
import { InsomniaRjsfForm, type InsomniaRjsfFormHandle } from '~/ui/components/rjsf';
import { useAIFeatureStatus } from '~/ui/hooks/use-organization-features';
interface SamplingFormProps {
requestId: string;
serverRequestId: string;
samplingData: CreateMessageRequest;
}
const uiSchema: UiSchema = {
'ui:submitButtonOptions': {
norender: true,
},
'content': {
'ui:widget': 'textarea',
},
};
const samplingFormSchema: RJSFSchema = {
type: 'object',
properties: {
content: {
type: 'string',
description: 'The content of the message generated by the LLM.',
},
role: {
type: 'string',
enum: ['user', 'assistant'],
},
model: {
type: 'string',
},
stopReason: {
type: 'string',
enum: ['endTurn', 'stopSequence', 'maxTokens'],
},
},
required: ['content', 'model', 'role'],
};
export const SamplingForm = ({ requestId, serverRequestId, samplingData }: SamplingFormProps) => {
const rjsfFormRef = useRef<InsomniaRjsfFormHandle>(null);
const [formData, setFormData] = useState({});
const { isMCPWithAIEnabled } = useAIFeatureStatus();
const handleRjsfFormChange = (formData: any) => {
setFormData(formData);
};
const generateSamplingResponseFetcher = useAIGenerateActionFetcher();
const isGenerating = generateSamplingResponseFetcher.state !== 'idle';
return (
<div className="flex grow flex-col overflow-hidden">
<div className="h-[calc(100%-var(--line-height-sm))] overflow-auto bg-inherit px-5 py-1">
<InsomniaRjsfForm
formData={formData}
onChange={handleRjsfFormChange}
schema={samplingFormSchema}
uiSchema={uiSchema}
ref={rjsfFormRef}
showErrorList={false}
focusOnFirstError
/>
</div>
<Toolbar className="content-box sticky bottom-0 z-10 flex h-(--line-height-sm) shrink-0 gap-3 border-b border-(--hl-md) bg-(--color-bg) px-5 py-2 text-(--font-size-sm)">
{isMCPWithAIEnabled && (
<Button
onPress={() => {
const { params } = samplingData;
const { messages, maxTokens, temperature, systemPrompt } = params;
const samplingMessages: MultiTurnMessage[] = [];
for (const msg of messages) {
const { role, content } = msg;
if ('type' in content && content.type === 'text') {
// only handle text type for now
samplingMessages.push({
role,
content: content.text,
});
}
}
generateSamplingResponseFetcher.submit({
messages: samplingMessages,
systemPrompt,
maxTokens,
temperature,
requestId,
serverRequestId,
});
}}
className="rounded-sm bg-(--color-surprise) px-(--padding-md) text-center text-(--color-font-surprise) hover:brightness-75"
>
<Icon
icon={isGenerating ? 'spinner' : 'star'}
className={`mr-1 size-4 ${isGenerating ? 'animate-spin' : ''}`}
/>
Answer by AI
</Button>
)}
<Button
onPress={() => {
if (rjsfFormRef.current?.validate()) {
const samplingFormData = formData as Pick<CreateMessageResult, 'model' | 'stopReason' | 'role'> & {
content: string;
};
window.main.mcp.client.responseSamplingRequest({
requestId,
serverRequestId,
type: 'approve',
result: {
content: {
type: 'text',
text: samplingFormData.content,
},
model: samplingFormData.model,
stopReason: samplingFormData.stopReason || '',
role: samplingFormData.role,
},
});
}
}}
className="rounded-sm bg-(--color-surprise) px-(--padding-md) text-center text-(--color-font-surprise) hover:brightness-75"
>
Approve
</Button>
<Button
onPress={() =>
window.main.mcp.client.responseSamplingRequest({
requestId,
serverRequestId,
type: 'reject',
reason: 'User rejected the sampling request',
})
}
className="rounded-md border border-solid border-(--hl-lg) bg-(--color-bg) px-(--padding-md) text-center"
>
Reject
</Button>
</Toolbar>
</div>
);
};

View File

@@ -98,6 +98,21 @@ const CustomDatePickerWidget = (props: WidgetProps) => {
<DatePicker id={id} isInvalid={isInvalid} value={value ? parseDate(value) : null} onChange={handleDateChange} />
);
};
// Textarea widget for string values
const CustomTextAreaWidget = (props: WidgetProps) => {
const { value, onChange, disabled, readonly, id } = props;
return (
<textarea
id={id}
disabled={disabled}
className={baseInputClasses}
readOnly={readonly}
value={value || ''}
onChange={e => onChange(e.target.value)}
/>
);
};
// ===== TEMPLATES =====
@@ -355,6 +370,7 @@ const themeWidgets: RegistryWidgetsType = {
CheckboxWidget: CustomCheckboxWidget,
SelectWidget: CustomSelectWidget,
DateWidget: CustomDatePickerWidget,
TextareaWidget: CustomTextAreaWidget,
};
const themeTemplates = {

View File

@@ -16,11 +16,13 @@ export const AISettings = () => {
const [configuredLLMs, setConfiguredLLMs] = useState<LLMConfig[]>([]);
const [mockServerEnabled, setMockServerEnabled] = useState(false);
const [commitMessagesEnabled, setCommitMessagesEnabled] = useState(false);
const [mcpClientEnabled, setMcpClientEnabled] = useState(false);
const hasActiveLLM = currentLLM !== null;
// If the feature is undefined, default to disabled (org hasn't enabled it)
const isMockServerEnabledByOrg = features.aiMockServers ? features.aiMockServers.enabled : false;
const isCommitMessagesEnabledByOrg = features.aiCommitMessages ? features.aiCommitMessages.enabled : false;
const isMcpClientEnabledByOrg = features.aiMcpClient ? features.aiMcpClient.enabled : false;
useEffect(() => {
const loadConfigurations = async () => {
@@ -28,9 +30,11 @@ export const AISettings = () => {
const current = await window.main.llm.getActiveBackend();
const mockServerFeature = await window.main.llm.getAIFeatureEnabled('aiMockServers');
const commitMessagesFeature = await window.main.llm.getAIFeatureEnabled('aiCommitMessages');
const mcpClientFeature = await window.main.llm.getAIFeatureEnabled('aiMcpClient');
setMockServerEnabled(isMockServerEnabledByOrg && mockServerFeature);
setCommitMessagesEnabled(isCommitMessagesEnabledByOrg && commitMessagesFeature);
setMcpClientEnabled(isMcpClientEnabledByOrg && mcpClientFeature);
setConfiguredLLMs(configs);
if (current) {
@@ -40,7 +44,7 @@ export const AISettings = () => {
};
loadConfigurations();
}, [isMockServerEnabledByOrg, isCommitMessagesEnabledByOrg]);
}, [isMockServerEnabledByOrg, isCommitMessagesEnabledByOrg, isMcpClientEnabledByOrg]);
const saveLLMSettings = useCallback(
async (setCurrent: boolean, backend: LLMBackend, extras: Partial<LLMConfig> = {}) => {
@@ -77,6 +81,11 @@ export const AISettings = () => {
await window.main.llm.setAIFeatureEnabled('aiCommitMessages', enabled);
}, []);
const handleMcpClientToggle = useCallback(async (enabled: boolean) => {
setMcpClientEnabled(enabled);
await window.main.llm.setAIFeatureEnabled('aiMcpClient', enabled);
}, []);
const activeBadge = (
<span className="bg-surprise flex h-5 min-w-5 items-center justify-center rounded-full px-2 py-1 text-xs text-white">
Active
@@ -155,6 +164,30 @@ export const AISettings = () => {
</div>
</Switch>
</div>
<div className="flex items-center justify-between">
<div className="flex flex-col gap-1">
<span className="text-sm font-medium text-(--color-font)">Allow MCP client to use the LLM service</span>
{!isMcpClientEnabledByOrg ? (
<p className="text-xs text-(--color-danger)">
Disabled by organization
{features.aiMcpClient?.reason ? `: ${features.aiMcpClient.reason}` : ''}
</p>
) : !hasActiveLLM ? (
<p className="text-xs text-(--hl)">Configure and activate an LLM to enable this feature</p>
) : null}
</div>
<Switch
isSelected={mcpClientEnabled && isMcpClientEnabledByOrg}
onChange={handleMcpClientToggle}
isDisabled={!hasActiveLLM || !isMcpClientEnabledByOrg}
className="group flex items-center gap-2"
>
<div className="flex h-6 w-11 cursor-pointer items-center rounded-full border-2 border-solid border-transparent bg-(--hl-md) transition-colors group-data-disabled:cursor-not-allowed group-data-disabled:opacity-50 group-data-selected:bg-(--color-surprise)">
<span className="h-5 w-5 translate-x-0 rounded-full bg-white transition-transform group-data-selected:translate-x-5" />
</div>
</Switch>
</div>
</div>
</div>

View File

@@ -48,21 +48,26 @@ export function useOrganizationPermissions() {
interface AIFeatureStatus {
isGenerateMockServersWithAIEnabled: boolean;
isGenerateCommitMessagesWithAIEnabled: boolean;
isMCPWithAIEnabled: boolean;
}
export function useAIFeatureStatus(): AIFeatureStatus {
const { features } = useOrganizationPermissions();
const [generateMockServersWithAIEnabledByUser, setGenerateMockServersWithAIEnabledByUser] = useState(false);
const [generateCommitMessagesWithAIEnabledByUser, setGenerateCommitMessagesWithAIEnabledByUser] = useState(false);
const [mcpIntegrationWithAIEnabledByUser, setMCPIntegrationWithAIEnabledByUser] = useState(false);
const [hasActiveLLM, setHasActiveLLM] = useState(false);
const loadFeatureStatus = useCallback(async () => {
const userEnabledGenerateMockServersWithAI = await window.main.llm.getAIFeatureEnabled('aiMockServers');
const userEnabledGenerateCommitMessagesWithAI = await window.main.llm.getAIFeatureEnabled('aiCommitMessages');
const userEnabledMcpClientWithAI = await window.main.llm.getAIFeatureEnabled('aiMcpClient');
const currentLLM = await window.main.llm.getCurrentConfig();
setGenerateMockServersWithAIEnabledByUser(userEnabledGenerateMockServersWithAI);
setGenerateCommitMessagesWithAIEnabledByUser(userEnabledGenerateCommitMessagesWithAI);
setMCPIntegrationWithAIEnabledByUser(userEnabledMcpClientWithAI);
setHasActiveLLM(currentLLM !== null);
}, []);
@@ -72,11 +77,13 @@ export function useAIFeatureStatus(): AIFeatureStatus {
const generateMockServersWithAIAllowedByOrg = features.aiMockServers ? features.aiMockServers.enabled : true;
const generateCommitMessagesWithAIAllowedByOrg = features.aiCommitMessages ? features.aiCommitMessages.enabled : true;
const mcpClientWithAIAllowedByOrg = features.aiMcpClient ? features.aiMcpClient.enabled : true;
return {
isGenerateMockServersWithAIEnabled:
generateMockServersWithAIAllowedByOrg && generateMockServersWithAIEnabledByUser && hasActiveLLM,
isGenerateCommitMessagesWithAIEnabled:
generateCommitMessagesWithAIAllowedByOrg && generateCommitMessagesWithAIEnabledByUser && hasActiveLLM,
isMCPWithAIEnabled: mcpClientWithAIAllowedByOrg && mcpIntegrationWithAIEnabledByUser && hasActiveLLM,
};
}