The core AI library for TanStack AI.
npm install @tanstack/ai
Creates a streaming chat response.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Hello!" }],
tools: [myTool],
systemPrompts: ["You are a helpful assistant"],
agentLoopStrategy: maxIterations(20),
});
An async iterable of StreamChunk.
Creates a text summarization.
import { summarize } from "@tanstack/ai";
import { openaiSummarize } from "@tanstack/ai-openai";
const result = await summarize({
adapter: openaiSummarize("gpt-5.2"),
text: "Long text to summarize...",
maxLength: 100,
style: "concise",
});
A SummarizationResult with the summary text.
Creates an isomorphic tool definition that can be instantiated for server or client execution.
import { toolDefinition } from "@tanstack/ai";
import { z } from "zod";
const myToolDef = toolDefinition({
name: "my_tool",
description: "Tool description",
inputSchema: z.object({
param: z.string(),
}),
outputSchema: z.object({
result: z.string(),
}),
needsApproval: false, // Optional
});
// Or create client implementation
const myClientTool = myToolDef.client(async ({ param }) => {
// Client-side implementation
return { result: "..." };
});
// Use directly in chat() (server-side, no execute)
chat({
adapter: openaiText("gpt-5.2"),
tools: [myToolDef],
messages: [{ role: "user", content: "..." }],
});
// Or create server implementation
const myServerTool = myToolDef.server(async ({ param }) => {
// Server-side implementation
return { result: "..." };
});
// Use directly in chat() (server-side, no execute)
chat({
adapter: openaiText("gpt-5.2"),
tools: [myServerTool],
messages: [{ role: "user", content: "..." }],
});
A ToolDefinition object with .server() and .client() methods for creating concrete implementations.
Converts a stream to a ReadableStream in Server-Sent Events format.
import { chat, toServerSentEventsStream } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [...],
});
const readableStream = toServerSentEventsStream(stream);
A ReadableStream<Uint8Array> in Server-Sent Events format. Each chunk is:
Converts a stream to an HTTP Response with proper SSE headers.
import { chat, toServerSentEventsResponse } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [...],
});
return toServerSentEventsResponse(stream);
A Response object suitable for HTTP endpoints with SSE headers (Content-Type: text/event-stream, Cache-Control: no-cache, Connection: keep-alive).
Creates an agent loop strategy that limits iterations.
import { chat, maxIterations } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [...],
agentLoopStrategy: maxIterations(20),
});
An AgentLoopStrategy object.
interface ModelMessage {
role: "user" | "assistant" | "system" | "tool";
content: string;
toolCallId?: string;
}
type StreamChunk =
| ContentStreamChunk
| ThinkingStreamChunk
| ToolCallStreamChunk
| ToolResultStreamChunk
| DoneStreamChunk
| ErrorStreamChunk;
interface ThinkingStreamChunk {
type: "thinking";
id: string;
model: string;
timestamp: number;
delta?: string; // Incremental thinking token
content: string; // Accumulated thinking content
}
Stream chunks represent different types of data in the stream:
interface Tool {
type: "function";
function: {
name: string;
description: string;
parameters: Record<string, any>;
};
execute?: (args: any) => Promise<any> | any;
needsApproval?: boolean;
}
import { chat, summarize, generateImage } from "@tanstack/ai";
import {
openaiText,
openaiSummarize,
openaiImage,
} from "@tanstack/ai-openai";
// --- Streaming chat
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Hello!" }],
});
// --- One-shot chat response (stream: false)
const response = await chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "What's the capital of France?" }],
stream: false, // Returns a Promise<string> instead of AsyncIterable
});
// --- Structured response with outputSchema
import { z } from "zod";
const parsed = await chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Summarize this text in JSON with keys 'summary' and 'keywords': ... " }],
outputSchema: z.object({
summary: z.string(),
keywords: z.array(z.string()),
}),
});
// --- Structured response with tools
import { toolDefinition } from "@tanstack/ai";
const weatherTool = toolDefinition({
name: "getWeather",
description: "Get the current weather for a city",
inputSchema: z.object({
city: z.string().describe("City name"),
}),
}).server(async ({ city }) => {
// Implementation that fetches weather info
return JSON.stringify({ temperature: 72, condition: "Sunny" });
});
const toolResult = await chat({
adapter: openaiText("gpt-5.2"),
messages: [
{ role: "user", content: "What's the weather in Paris?" }
],
tools: [weatherTool],
outputSchema: z.object({
answer: z.string(),
weather: z.object({
temperature: z.number(),
condition: z.string(),
}),
}),
});
// --- Summarization
const summary = await summarize({
adapter: openaiSummarize("gpt-5.2"),
text: "Long text to summarize...",
maxLength: 100,
});
// --- Image generation
const image = await generateImage({
adapter: openaiImage("dall-e-3"),
prompt: "A futuristic city skyline at sunset",
numberOfImages: 1,
size: "1024x1024",
});