WARNING: THIS SITE IS A MIRROR OF GITHUB.COM / IT CANNOT LOGIN OR REGISTER ACCOUNTS / THE CONTENTS ARE PROVIDED AS-IS / THIS SITE ASSUMES NO RESPONSIBILITY FOR ANY DISPLAYED CONTENT OR LINKS / IF YOU FOUND SOMETHING MAY NOT GOOD FOR EVERYONE, CONTACT ADMIN AT ilovescratch@foxmail.com
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/slimy-apes-destroy.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'@ai-sdk/openai': patch
---

feat(openai); adding OpenAI's new shell tool
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
import { openai } from '@ai-sdk/openai';
import {
generateText,
ModelMessage,
stepCountIs,
ToolApprovalResponse,
} from 'ai';
import * as readline from 'node:readline/promises';
import { executeShellCommand } from '../lib/shell-executor';
import { run } from '../lib/run';

const terminal = readline.createInterface({
input: process.stdin,
output: process.stdout,
});

run(async () => {
const messages: ModelMessage[] = [
{
role: 'user',
content: 'List the files in my current directory',
},
];
let approvals: ToolApprovalResponse[] = [];

while (true) {
if (approvals.length > 0) {
messages.push({ role: 'tool', content: approvals });
approvals = [];
}

const result = await generateText({
model: openai.responses('gpt-5.1'),
tools: {
shell: openai.tools.shell({
needsApproval: true,
execute: async ({ action }) => {
const outputs = await Promise.all(
action.commands.map(command =>
executeShellCommand(command, action.timeoutMs),
),
);

return { output: outputs };
},
}),
},
messages,
stopWhen: stepCountIs(5),
system:
'You have access to a shell tool that can execute commands on the local filesystem. ' +
'Use the shell tool when you need to perform file operations or run commands. ' +
'When a tool execution is not approved by the user, do not retry it. ' +
'Just say that the tool execution was not approved.',
});

process.stdout.write('\nAssistant: ');
for (const part of result.content) {
if (part.type === 'text') {
process.stdout.write(part.text);
}

if (part.type === 'tool-approval-request') {
const input =
typeof part.toolCall.input === 'string'
? JSON.parse(part.toolCall.input)
: part.toolCall.input;
const commands =
(input as { action?: { commands?: string[] } }).action?.commands ||
[];

console.log('\nShell command approval required:');
commands.forEach((cmd, index) => {
console.log(` ${index + 1}. ${cmd}`);
});

const answer = await terminal.question(
'\nProceed with execution? [y/N] ',
);

approvals.push({
type: 'tool-approval-response',
approvalId: part.approvalId,
approved:
answer.toLowerCase() === 'y' || answer.toLowerCase() === 'yes',
});
}
}

process.stdout.write('\n\n');

messages.push(...result.response.messages);

if (approvals.length === 0 && result.finishReason !== 'tool-calls') {
break;
}
}

terminal.close();
});
28 changes: 28 additions & 0 deletions examples/ai-core/src/generate-text/openai-responses-shell-tool.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import { openai } from '@ai-sdk/openai';
import { generateText, stepCountIs } from 'ai';
import { executeShellCommand } from '../lib/shell-executor';
import { run } from '../lib/run';

run(async () => {
const result = await generateText({
model: openai.responses('gpt-5.1'),
tools: {
shell: openai.tools.shell({
execute: async ({ action }) => {
const outputs = await Promise.all(
action.commands.map(command =>
executeShellCommand(command, action.timeoutMs),
),
);

return { output: outputs };
},
}),
},
prompt:
'Create a file in my ~/Desktop directory called dec1.txt with the text: THIS WORKS!',
stopWhen: stepCountIs(5),
});

console.log('Result:', result.text);
});
39 changes: 39 additions & 0 deletions examples/ai-core/src/lib/shell-executor.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import { exec } from 'node:child_process';
import { promisify } from 'node:util';

const execAsync = promisify(exec);

export async function executeShellCommand(
command: string,
timeoutMs?: number,
): Promise<{
stdout: string;
stderr: string;
outcome: { type: 'timeout' } | { type: 'exit'; exitCode: number };
}> {
const timeout = timeoutMs ?? 60_000; // Default 60 seconds

try {
const { stdout, stderr } = await execAsync(command, {
timeout,
maxBuffer: 10 * 1024 * 1024,
});

return {
stdout: stdout || '',
stderr: stderr || '',
outcome: { type: 'exit', exitCode: 0 },
};
} catch (error: any) {
const timedOut = error?.killed || error?.signal === 'SIGTERM';
const exitCode = timedOut ? null : (error?.code ?? 1);

return {
stdout: error?.stdout ?? '',
stderr: error?.stderr ?? String(error),
outcome: timedOut
? { type: 'timeout' }
: { type: 'exit', exitCode: exitCode ?? 1 },
};
}
}
52 changes: 52 additions & 0 deletions examples/ai-core/src/stream-text/openai-responses-shell-tool.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import { openai } from '@ai-sdk/openai';
import { stepCountIs, streamText } from 'ai';
import { executeShellCommand } from '../lib/shell-executor';
import { run } from '../lib/run';

run(async () => {
const result = streamText({
model: openai.responses('gpt-5.1'),
tools: {
shell: openai.tools.shell({
execute: async ({ action }) => {
const outputs = await Promise.all(
action.commands.map(command =>
executeShellCommand(command, action.timeoutMs),
),
);

return { output: outputs };
},
}),
},
prompt: 'List the files in my ~/Desktop directory',
stopWhen: stepCountIs(5),
});

for await (const chunk of result.fullStream) {
switch (chunk.type) {
case 'text-delta': {
process.stdout.write(chunk.text);
break;
}

case 'tool-call': {
console.log(
`\x1b[32m\x1b[1mTool call:\x1b[22m ${JSON.stringify(chunk, null, 2)}\x1b[0m`,
);
break;
}

case 'tool-result': {
console.log(
`\x1b[32m\x1b[1mTool result:\x1b[22m ${JSON.stringify(chunk, null, 2)}\x1b[0m`,
);
break;
}

case 'error':
console.error('Error:', chunk.error);
break;
}
}
});
87 changes: 87 additions & 0 deletions examples/next-openai/agent/openai-shell-agent.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import { openai } from '@ai-sdk/openai';
import { Sandbox } from '@vercel/sandbox';
import { ToolLoopAgent, InferAgentUIMessage } from 'ai';

// warning: this is a demo sandbox that is shared across chats on localhost
let globalSandboxId: string | null = null;
async function getSandbox(): Promise<Sandbox> {
if (globalSandboxId) {
return await Sandbox.get({ sandboxId: globalSandboxId });
}
const sandbox = await Sandbox.create();
globalSandboxId = sandbox.sandboxId;
return sandbox;
}

async function executeShellCommand(
command: string,
timeoutMs?: number,
): Promise<{
stdout: string;
stderr: string;
outcome: { type: 'timeout' } | { type: 'exit'; exitCode: number };
}> {
const sandbox = await getSandbox();
const timeout = timeoutMs ?? 60_000; // Default 60 seconds

try {
// Use Promise.race to handle timeout
const timeoutPromise = new Promise<never>((_, reject) => {
setTimeout(() => reject(new Error('Command timeout')), timeout);
});

const commandPromise = sandbox.runCommand({
cmd: 'sh',
args: ['-c', command],
});

const commandResult = await Promise.race([commandPromise, timeoutPromise]);

const stdout = await commandResult.stdout();
const stderr = await commandResult.stderr();
const exitCode = commandResult.exitCode ?? 0;

return {
stdout: stdout || '',
stderr: stderr || '',
outcome: { type: 'exit', exitCode },
};
} catch (error: any) {
// Handle timeout or other errors
const timedOut = error?.message?.includes('timeout') || false;
const exitCode = timedOut ? null : (error?.code ?? 1);

return {
stdout: error?.stdout ?? '',
stderr: error?.stderr ?? String(error),
outcome: timedOut
? { type: 'timeout' }
: { type: 'exit', exitCode: exitCode ?? 1 },
};
}
}

export const openaiShellAgent = new ToolLoopAgent({
model: openai.responses('gpt-5.1'),
instructions:
'You have access to a shell tool that can execute commands on the local filesystem. ' +
'Use the shell tool when you need to perform file operations or run commands. ' +
'When a tool execution is not approved by the user, do not retry it. ' +
'Just say that the tool execution was not approved.',
tools: {
shell: openai.tools.shell({
needsApproval: true,
async execute({ action }) {
const outputs = await Promise.all(
action.commands.map(command =>
executeShellCommand(command, action.timeoutMs),
),
);

return { output: outputs };
},
}),
},
});

export type OpenAIShellMessage = InferAgentUIMessage<typeof openaiShellAgent>;
11 changes: 11 additions & 0 deletions examples/next-openai/app/api/chat-openai-shell/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import { openaiShellAgent } from '@/agent/openai-shell-agent';
import { createAgentUIStreamResponse } from 'ai';

export async function POST(req: Request) {
const { messages } = await req.json();

return createAgentUIStreamResponse({
agent: openaiShellAgent,
messages,
});
}
65 changes: 65 additions & 0 deletions examples/next-openai/app/chat-openai-shell/page.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
'use client';

import { useChat } from '@ai-sdk/react';
import {
DefaultChatTransport,
lastAssistantMessageIsCompleteWithApprovalResponses,
} from 'ai';
import ChatInput from '@/components/chat-input';
import { OpenAIShellMessage } from '@/agent/openai-shell-agent';
import ShellView from '@/components/tool/openai-shell-view';

export default function ChatOpenAIShell() {
const { status, sendMessage, messages, addToolApprovalResponse } =
useChat<OpenAIShellMessage>({
transport: new DefaultChatTransport({
api: '/api/chat-openai-shell',
}),
sendAutomaticallyWhen:
lastAssistantMessageIsCompleteWithApprovalResponses,
});

return (
<div className="flex flex-col py-24 mx-auto w-full max-w-4xl stretch">
<h1 className="mb-2 text-xl font-bold text-black">OpenAI Shell Tool</h1>
<h2 className="pb-2 mb-4 border-b text-black">
Note: This example requires a Vercel OIDC Token to run commands with
Vercel Sandbox
</h2>

{messages.map(message => (
<div key={message.id} className="whitespace-pre-wrap mb-4">
<div className="mb-2">
<div className="text-sm font-semibold text-black mb-1">
{message.role === 'user' ? 'User:' : 'Assistant:'}
</div>
<div className="space-y-4">
{message.parts.map((part, index) => {
switch (part.type) {
case 'text':
return (
<div key={index} className="text-black">
{part.text}
</div>
);
case 'tool-shell':
return (
<ShellView
key={index}
invocation={part}
addToolApprovalResponse={addToolApprovalResponse}
/>
);
default:
return null;
}
})}
</div>
</div>
</div>
))}

<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
</div>
);
}
Loading
Loading