diff --git a/.circleci/config.yml b/.circleci/config.yml
index 95d0293f8c820..8e04d6d405805 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -47,7 +47,7 @@ default-job: &default-job
parameters:
<<: *default-parameters
environment:
- # expose it globally otherwise we have to thread it from each job to the install command
+ &default-environment # expose it globally otherwise we have to thread it from each job to the install command
BROWSERSTACK_FORCE: << pipeline.parameters.browserstack-force >>
COREPACK_ENABLE_DOWNLOAD_PROMPT: '0'
# Used by the code-infra orb to diff against the correct base branch for dedupe and prettier checks
@@ -140,6 +140,9 @@ jobs:
path: test-results
test_lint:
<<: *default-job
+ environment:
+ <<: *default-environment
+ NODE_OPTIONS: --max-old-space-size=4096
steps:
- checkout
- install-deps
diff --git a/docs/data/chat/ai-and-agents/reasoning/reasoning.md b/docs/data/chat/ai-and-agents/reasoning/reasoning.md
new file mode 100644
index 0000000000000..64cb3a3f6b68b
--- /dev/null
+++ b/docs/data/chat/ai-and-agents/reasoning/reasoning.md
@@ -0,0 +1,175 @@
+---
+productId: x-chat
+title: Reasoning
+packageName: '@mui/x-chat'
+githubLabel: 'scope: chat'
+components: ChatMessageContent
+---
+
+# Chat - Reasoning
+
+
Display the LLM's chain-of-thought or thinking trace using ChatReasoningMessagePart and the reasoning stream chunks.
+
+{{"component": "@mui/internal-core-docs/ComponentLinkHeader"}}
+
+Many large language models expose a "thinking" or "reasoning" trace alongside their final response. The Chat component supports streaming and displaying this reasoning content through dedicated chunk types and a specialized message part.
+
+## `ChatReasoningMessagePart`
+
+When reasoning chunks arrive during streaming, the runtime creates a `ChatReasoningMessagePart` on the assistant message:
+
+```ts
+interface ChatReasoningMessagePart {
+ type: 'reasoning';
+ text: string;
+ state?: ChatMessagePartStatus; // 'streaming' | 'done'
+}
+```
+
+| Field | Type | Description |
+| :------ | :---------------------- | :-------------------------------------------- |
+| `type` | `'reasoning'` | Identifies this as a reasoning part |
+| `text` | `string` | The accumulated reasoning text |
+| `state` | `'streaming' \| 'done'` | Whether the reasoning is still being streamed |
+
+The `state` field transitions from `'streaming'` while deltas are arriving to `'done'` once the reasoning section is complete.
+
+## Reasoning stream chunks
+
+Reasoning content is streamed using a triplet of chunks, following the same pattern as text chunks:
+
+| Chunk type | Fields | Description |
+| :---------------- | :------------ | :-------------------------- |
+| `reasoning-start` | `id` | Begin a reasoning part |
+| `reasoning-delta` | `id`, `delta` | Append reasoning content |
+| `reasoning-end` | `id` | Finalize the reasoning part |
+
+### How chunks become parts
+
+1. `reasoning-start` creates a new `ChatReasoningMessagePart` with `state: 'streaming'`.
+2. `reasoning-delta` appends the `delta` text to the existing reasoning part.
+3. `reasoning-end` sets `state: 'done'`.
+
+Multiple `reasoning-delta` chunks are batched according to `streamFlushInterval` before being applied to the store, just like text deltas.
+
+### Streaming example
+
+```tsx
+const adapter: ChatAdapter = {
+ async sendMessage({ message }) {
+ return new ReadableStream({
+ start(controller) {
+ controller.enqueue({ type: 'start', messageId: 'msg-1' });
+
+ // Reasoning section
+ controller.enqueue({ type: 'reasoning-start', id: 'reasoning-1' });
+ controller.enqueue({
+ type: 'reasoning-delta',
+ id: 'reasoning-1',
+ delta: 'The user is asking about weather in Paris. ',
+ });
+ controller.enqueue({
+ type: 'reasoning-delta',
+ id: 'reasoning-1',
+ delta: 'I should check the current forecast data.',
+ });
+ controller.enqueue({ type: 'reasoning-end', id: 'reasoning-1' });
+
+ // Text response
+ controller.enqueue({ type: 'text-start', id: 'text-1' });
+ controller.enqueue({
+ type: 'text-delta',
+ id: 'text-1',
+ delta: 'The weather in Paris is currently 22 degrees and sunny.',
+ });
+ controller.enqueue({ type: 'text-end', id: 'text-1' });
+
+ controller.enqueue({ type: 'finish', messageId: 'msg-1' });
+ controller.close();
+ },
+ });
+ },
+};
+```
+
+## Displaying reasoning in a collapsible section
+
+Reasoning content is typically displayed in a collapsible section above the main response text. Register a custom renderer for reasoning parts to control the presentation:
+
+```tsx
+import Accordion from '@mui/material/Accordion';
+import AccordionSummary from '@mui/material/AccordionSummary';
+import AccordionDetails from '@mui/material/AccordionDetails';
+import ExpandMoreIcon from '@mui/icons-material/ExpandMore';
+import Typography from '@mui/material/Typography';
+
+const renderers: ChatPartRendererMap = {
+ reasoning: ({ part }) => (
+
+ }>
+
+ {part.state === 'streaming' ? 'Thinking...' : 'Reasoning'}
+
+
+
+
+ {part.text}
+
+
+
+ ),
+};
+
+
+
+;
+```
+
+## Show/hide configuration
+
+Control whether reasoning is visible to the user by filtering parts in your renderer. You can use a prop, a context value, or application state to toggle visibility:
+
+```tsx
+function ReasoningPart({ part, showReasoning }) {
+ if (!showReasoning) return null;
+
+ return (
+
+ );
+}
+
+// Register with a configurable toggle
+const renderers: ChatPartRendererMap = {
+ reasoning: ({ part }) => (
+
+ ),
+};
+```
+
+## Reasoning alongside tool calls
+
+Reasoning chunks can appear before, between, or after tool invocations in the same stream. The runtime handles interleaving correctly — each chunk type creates its own message part in the order it arrives:
+
+```tsx
+// Stream order:
+// 1. reasoning-start -> reasoning-delta -> reasoning-end (thinking)
+// 2. tool-input-start -> tool-input-available (tool call)
+// 3. reasoning-start -> reasoning-delta -> reasoning-end (thinking about result)
+// 4. tool-output-available (tool result)
+// 5. text-start -> text-delta -> text-end (final answer)
+```
+
+This produces a message with five parts in order: reasoning, tool, reasoning, tool (updated), text.
+
+## See also
+
+- [Tool Calling](/x/react-chat/ai-and-agents/tool-calling/) for the tool invocation lifecycle.
+- [Step Tracking](/x/react-chat/ai-and-agents/step-tracking/) for multi-step agent progress tracking.
+- [Streaming](/x/react-chat/behavior/streaming/) for the full chunk protocol reference including reasoning chunks.
diff --git a/docs/data/chat/ai-and-agents/step-tracking/step-tracking.md b/docs/data/chat/ai-and-agents/step-tracking/step-tracking.md
new file mode 100644
index 0000000000000..ba527ed3547e1
--- /dev/null
+++ b/docs/data/chat/ai-and-agents/step-tracking/step-tracking.md
@@ -0,0 +1,246 @@
+---
+productId: x-chat
+title: Step Tracking
+packageName: '@mui/x-chat'
+githubLabel: 'scope: chat'
+components: ChatMessageContent
+---
+
+# Chat - Step Tracking
+
+
Track multi-step agent progress using start-step and finish-step stream chunks that create visual delimiters in the message.
+
+{{"component": "@mui/internal-core-docs/ComponentLinkHeader"}}
+
+Agentic AI workflows often involve multiple processing steps — reasoning, tool calls, intermediate results, and a final answer. Step tracking lets you visually delimit these phases in the message stream so users can follow the agent's progress.
+
+## Step boundary chunks
+
+The streaming protocol provides two chunks for step boundaries:
+
+| Chunk type | Description |
+| :------------ | :-------------------------- |
+| `start-step` | Begin a new processing step |
+| `finish-step` | End the current step |
+
+```ts
+interface ChatStartStepChunk {
+ type: 'start-step';
+}
+
+interface ChatFinishStepChunk {
+ type: 'finish-step';
+}
+```
+
+## `ChatStepStartMessagePart`
+
+When a `start-step` chunk arrives, the runtime creates a `ChatStepStartMessagePart` on the assistant message:
+
+```ts
+interface ChatStepStartMessagePart {
+ type: 'step-start';
+}
+```
+
+The `finish-step` chunk signals the end of the current step but does not create a separate message part — it serves as a boundary marker in the stream.
+
+## Streaming example
+
+A typical agentic loop might produce multiple steps, each containing reasoning, tool calls, or text:
+
+```tsx
+const adapter: ChatAdapter = {
+ async sendMessage({ message }) {
+ return new ReadableStream({
+ start(controller) {
+ controller.enqueue({ type: 'start', messageId: 'msg-1' });
+
+ // Step 1: Search for information
+ controller.enqueue({ type: 'start-step' });
+ controller.enqueue({
+ type: 'tool-input-start',
+ toolCallId: 'call-1',
+ toolName: 'search',
+ });
+ controller.enqueue({
+ type: 'tool-input-available',
+ toolCallId: 'call-1',
+ toolName: 'search',
+ input: { query: 'MUI X Chat documentation' },
+ });
+ controller.enqueue({
+ type: 'tool-output-available',
+ toolCallId: 'call-1',
+ output: { results: ['...'] },
+ });
+ controller.enqueue({ type: 'finish-step' });
+
+ // Step 2: Analyze results
+ controller.enqueue({ type: 'start-step' });
+ controller.enqueue({
+ type: 'tool-input-start',
+ toolCallId: 'call-2',
+ toolName: 'analyze',
+ });
+ controller.enqueue({
+ type: 'tool-input-available',
+ toolCallId: 'call-2',
+ toolName: 'analyze',
+ input: { data: '...' },
+ });
+ controller.enqueue({
+ type: 'tool-output-available',
+ toolCallId: 'call-2',
+ output: { summary: '...' },
+ });
+ controller.enqueue({ type: 'finish-step' });
+
+ // Step 3: Final answer
+ controller.enqueue({ type: 'start-step' });
+ controller.enqueue({ type: 'text-start', id: 'text-1' });
+ controller.enqueue({
+ type: 'text-delta',
+ id: 'text-1',
+ delta: 'Based on my research, here is the answer...',
+ });
+ controller.enqueue({ type: 'text-end', id: 'text-1' });
+ controller.enqueue({ type: 'finish-step' });
+
+ controller.enqueue({ type: 'finish', messageId: 'msg-1' });
+ controller.close();
+ },
+ });
+ },
+};
+```
+
+## Displaying step progress
+
+The `step-start` parts act as delimiters in the message's `parts` array. You can render them as visual separators, progress indicators, or collapsible sections.
+
+### Step delimiter renderer
+
+Register a custom renderer for `step-start` parts:
+
+```tsx
+const renderers: ChatPartRendererMap = {
+ 'step-start': ({ index, message }) => {
+ const stepNumber = message.parts
+ .slice(0, index + 1)
+ .filter((part) => part.type === 'step-start').length;
+ return (
+
+
+ Step {stepNumber}
+
+
+ );
+ },
+};
+
+
+
+;
+```
+
+### Step progress with Material UI
+
+For a more polished UI, use Material UI components to show step progress:
+
+```tsx
+import Divider from '@mui/material/Divider';
+import Typography from '@mui/material/Typography';
+
+const renderers: ChatPartRendererMap = {
+ 'step-start': ({ index, message }) => {
+ const stepNumber = message.parts
+ .slice(0, index + 1)
+ .filter((part) => part.type === 'step-start').length;
+ return (
+
+
+ Step {stepNumber}
+
+
+ );
+ },
+};
+```
+
+## Step structure in the parts array
+
+After streaming, the message's `parts` array contains `step-start` entries interleaved with the content parts for each step:
+
+```ts
+// Example parts array after streaming
+[
+ { type: 'step-start' }, // Step 1 delimiter
+ {
+ type: 'tool',
+ toolInvocation: {
+ /* ... */
+ },
+ }, // Step 1 content
+ { type: 'step-start' }, // Step 2 delimiter
+ {
+ type: 'tool',
+ toolInvocation: {
+ /* ... */
+ },
+ }, // Step 2 content
+ { type: 'step-start' }, // Step 3 delimiter
+ { type: 'text', text: 'Final answer...' }, // Step 3 content
+];
+```
+
+This structure makes it straightforward to group parts by step when building custom renderers. Iterate through the parts and treat each `step-start` as a new group boundary.
+
+## Steps with reasoning and tool calls
+
+Steps compose naturally with reasoning and tool calling. A single step can contain reasoning, one or more tool invocations, and text:
+
+```tsx
+// Step with reasoning + tool call
+controller.enqueue({ type: 'start-step' });
+controller.enqueue({ type: 'reasoning-start', id: 'r-1' });
+controller.enqueue({
+ type: 'reasoning-delta',
+ id: 'r-1',
+ delta: 'I need to look up the user data first.',
+});
+controller.enqueue({ type: 'reasoning-end', id: 'r-1' });
+controller.enqueue({
+ type: 'tool-input-start',
+ toolCallId: 'call-1',
+ toolName: 'get_user',
+});
+controller.enqueue({
+ type: 'tool-input-available',
+ toolCallId: 'call-1',
+ toolName: 'get_user',
+ input: { userId: '123' },
+});
+controller.enqueue({
+ type: 'tool-output-available',
+ toolCallId: 'call-1',
+ output: { name: 'Alice', email: 'alice@example.com' },
+});
+controller.enqueue({ type: 'finish-step' });
+```
+
+## See also
+
+- [Tool Calling](/x/react-chat/ai-and-agents/tool-calling/) for the tool invocation lifecycle within steps.
+- [Reasoning](/x/react-chat/ai-and-agents/reasoning/) for displaying LLM thinking traces.
+- [Streaming](/x/react-chat/behavior/streaming/) for the full chunk protocol reference including step boundary chunks.
+- [Tool Approval](/x/react-chat/ai-and-agents/tool-approval/) for human-in-the-loop checkpoints within agent steps.
diff --git a/docs/data/chat/ai-and-agents/tool-approval/tool-approval.md b/docs/data/chat/ai-and-agents/tool-approval/tool-approval.md
new file mode 100644
index 0000000000000..50331555c98d3
--- /dev/null
+++ b/docs/data/chat/ai-and-agents/tool-approval/tool-approval.md
@@ -0,0 +1,225 @@
+---
+productId: x-chat
+title: Tool Approval
+packageName: '@mui/x-chat'
+githubLabel: 'scope: chat'
+components: ChatConfirmation
+---
+
+# Chat - Tool Approval
+
+
Add human-in-the-loop checkpoints before the agent executes tool calls, using the approval lifecycle and the ChatConfirmation UI component.
+
+{{"component": "@mui/internal-core-docs/ComponentLinkHeader"}}
+
+Tool approval lets you pause the agent when it requests a potentially dangerous action, present the user with an approve/deny interface, and resume or cancel the tool execution based on the user's decision.
+
+## Approval workflow
+
+The approval lifecycle extends the standard tool invocation states with two additional phases:
+
+| State | Description |
+| :------------------- | :-------------------------------------- |
+| `input-streaming` | Tool input JSON is being streamed |
+| `input-available` | Tool input is fully available |
+| `approval-requested` | Stream pauses — user approval is needed |
+| `approval-responded` | User has responded, stream continues |
+| `output-available` | Tool output is ready (if approved) |
+| `output-denied` | User denied the tool call |
+
+The stream pauses at `approval-requested` until your UI calls `addToolApprovalResponse()`.
+
+## Stream chunk: `tool-approval-request`
+
+When the backend determines a tool call needs user approval, it sends a `tool-approval-request` chunk:
+
+```ts
+controller.enqueue({
+ type: 'tool-approval-request',
+ toolCallId: 'call-1',
+ toolName: 'delete_files',
+ input: { path: '/tmp/data', recursive: true },
+ approvalId: 'approval-1', // optional, defaults to toolCallId
+});
+```
+
+| Field | Type | Description |
+| :----------- | :-------------------- | :----------------------------------- |
+| `toolCallId` | `string` | The tool invocation being gated |
+| `toolName` | `string` | Name of the tool requesting approval |
+| `input` | `object` | The tool's input, shown to the user |
+| `approvalId` | `string \| undefined` | Optional distinct approval ID |
+
+When this chunk arrives, the tool invocation moves to `state: 'approval-requested'`.
+
+## The `addToolApprovalResponse` adapter method
+
+Implement `addToolApprovalResponse` on your adapter to send the user's decision to the backend:
+
+```ts
+interface ChatAddToolApproveResponseInput {
+ id: string; // the approval request ID from the stream chunk
+ approved: boolean; // true = approved, false = denied
+ reason?: string; // optional reason surfaced to the model when denied
+}
+```
+
+```tsx
+async addToolApprovalResponse({ id, approved, reason }) {
+ await fetch('/api/tool-approval', {
+ method: 'POST',
+ body: JSON.stringify({ id, approved, reason }),
+ });
+},
+```
+
+## Approve/deny flow in the UI
+
+Use `useChat()` to call `addToolApprovalResponse` from your component:
+
+```tsx
+const { addToolApprovalResponse } = useChat();
+
+// Approve
+await addToolApprovalResponse({
+ id: toolCall.toolCallId,
+ approved: true,
+});
+
+// Deny with reason
+await addToolApprovalResponse({
+ id: toolCall.toolCallId,
+ approved: false,
+ reason: 'User declined the operation',
+});
+```
+
+After responding, the tool invocation moves to `state: 'approval-responded'`, and the stream continues. If approved, the tool proceeds to execution and eventually reaches `output-available`. If denied, the tool moves to `output-denied`.
+
+## `ChatConfirmation` UI component
+
+`ChatConfirmation` renders a prominent warning card with a message and two action buttons for human-in-the-loop checkpoints:
+
+```tsx
+import { ChatConfirmation } from '@mui/x-chat';
+
+ agent.confirm()}
+ onCancel={() => agent.cancel()}
+/>;
+```
+
+### Custom labels
+
+Use `confirmLabel` and `cancelLabel` to tailor the button text to the action:
+
+```tsx
+
+```
+
+### Connecting to the adapter
+
+Hold the card visibility in `React.useState`. Show the card when the agent triggers a confirmation step, and hide it once the user responds:
+
+```tsx
+const [pendingConfirmation, setPendingConfirmation] = React.useState(false);
+
+const setConfirmRef = React.useRef(setPendingConfirmation);
+setConfirmRef.current = setPendingConfirmation;
+
+const adapter = React.useMemo(
+ () => ({
+ async sendMessage({ message }) {
+ // ... stream agent response ...
+ // Signal that a confirmation is needed:
+ setConfirmRef.current(true);
+ return responseStream;
+ },
+ }),
+ [],
+);
+
+// In your JSX:
+{
+ pendingConfirmation && (
+ {
+ setPendingConfirmation(false);
+ /* approve the tool */
+ }}
+ onCancel={() => setPendingConfirmation(false)}
+ />
+ );
+}
+```
+
+## Relationship to tool-call approval
+
+The built-in tool part `approval-requested` state handles the narrow case of approving a specific tool call — it renders inside the collapsible tool widget. `ChatConfirmation` is a broader, more prominent pattern for any "human-in-the-loop" checkpoint that does not require a structured tool invocation.
+
+Use the stream-based `tool-approval-request` when:
+
+- The approval is tied to a specific tool invocation
+- You want the approval UI inline within the message
+- The backend needs to resume the stream after approval
+
+Use `ChatConfirmation` when:
+
+- You need a prominent, page-level confirmation dialog
+- The confirmation is not tied to a specific tool call
+- You want full control over the confirmation UI and flow
+
+## Registering custom renderers for tool approval
+
+Register a renderer that handles the `approval-requested` state inside tool parts:
+
+```tsx
+const renderers: ChatPartRendererMap = {
+ tool: ({ part, message, index }) => {
+ const { toolInvocation } = part;
+
+ if (toolInvocation.state === 'approval-requested') {
+ return (
+
+ addToolApprovalResponse({
+ id: toolInvocation.toolCallId,
+ approved: true,
+ })
+ }
+ onDeny={(reason) =>
+ addToolApprovalResponse({
+ id: toolInvocation.toolCallId,
+ approved: false,
+ reason,
+ })
+ }
+ />
+ );
+ }
+
+ return ;
+ },
+};
+
+
+
+;
+```
+
+## See also
+
+- [Tool Calling](/x/react-chat/ai-and-agents/tool-calling/) for the full tool invocation lifecycle and chunk protocol.
+- [Adapter](/x/react-chat/backend/adapters/) for the `addToolApprovalResponse()` method reference.
+- [Streaming](/x/react-chat/behavior/streaming/) for the `tool-approval-request` chunk type.
+- [Reasoning](/x/react-chat/ai-and-agents/reasoning/) for displaying LLM thinking alongside tool calls.
diff --git a/docs/data/chat/ai-and-agents/tool-calling/tool-calling.md b/docs/data/chat/ai-and-agents/tool-calling/tool-calling.md
new file mode 100644
index 0000000000000..0c1d8408d7732
--- /dev/null
+++ b/docs/data/chat/ai-and-agents/tool-calling/tool-calling.md
@@ -0,0 +1,241 @@
+---
+productId: x-chat
+title: Tool Calling
+packageName: '@mui/x-chat'
+githubLabel: 'scope: chat'
+components: ChatMessageContent
+---
+
+# Chat - Tool Calling
+
+
Stream tool invocations from the LLM, track their lifecycle through well-defined states, and render custom tool UIs using the part renderer registry.
+
+{{"component": "@mui/internal-core-docs/ComponentLinkHeader"}}
+
+Tool calling lets an AI assistant invoke external functions during a conversation. The runtime handles the full tool lifecycle: streaming tool input, making the input available, executing the tool, and displaying the output — all through the streaming chunk protocol.
+
+## `ChatToolMessagePart`
+
+When a tool is invoked during streaming, the runtime creates a `ChatToolMessagePart` on the assistant message:
+
+```ts
+interface ChatToolMessagePart<
+ TToolName extends ChatKnownToolName = ChatKnownToolName,
+> {
+ type: 'tool';
+ toolInvocation: ChatToolInvocation;
+}
+```
+
+Each tool message part wraps a `ChatToolInvocation` that tracks the tool's lifecycle:
+
+```ts
+interface ChatToolInvocation<
+ TToolName extends ChatKnownToolName = ChatKnownToolName,
+> {
+ toolCallId: string;
+ toolName: TToolName;
+ state: ChatToolInvocationState;
+ input?: ChatToolInput;
+ output?: ChatToolOutput;
+ errorText?: string;
+ approval?: ChatToolApproval;
+ providerExecuted?: boolean;
+ title?: string;
+ callProviderMetadata?: Record;
+ preliminary?: boolean;
+}
+```
+
+## Tool invocation states
+
+The `toolInvocation.state` field tracks the tool lifecycle through well-defined states:
+
+| State | Description |
+| :------------------- | :----------------------------------------- |
+| `input-streaming` | Tool input JSON is being streamed |
+| `input-available` | Tool input is fully available |
+| `approval-requested` | User approval is needed before execution |
+| `approval-responded` | User has responded to the approval request |
+| `output-available` | Tool output is ready |
+| `output-error` | Tool execution failed |
+| `output-denied` | User denied the tool call |
+
+The typical progression is: `input-streaming` -> `input-available` -> `output-available`. When human-in-the-loop approval is required, the flow includes `approval-requested` -> `approval-responded` between input and output.
+
+## Stream chunk protocol
+
+Tool chunks in the streaming protocol drive the state transitions:
+
+| Chunk type | Fields | Description |
+| :---------------------- | :------------------------------------- | :---------------------------- |
+| `tool-input-start` | `toolCallId`, `toolName`, `dynamic?` | Begin a tool invocation |
+| `tool-input-delta` | `toolCallId`, `inputTextDelta` | Stream tool input JSON |
+| `tool-input-available` | `toolCallId`, `toolName`, `input` | Tool input is fully available |
+| `tool-input-error` | `toolCallId`, `errorText` | Tool input parsing failed |
+| `tool-output-available` | `toolCallId`, `output`, `preliminary?` | Tool output is available |
+| `tool-output-error` | `toolCallId`, `errorText` | Tool execution failed |
+| `tool-output-denied` | `toolCallId`, `reason?` | User denied the tool call |
+
+### Tool input streaming
+
+Tool input is streamed incrementally as JSON. The `tool-input-start` chunk begins the invocation with the tool name, `tool-input-delta` chunks append partial JSON, and `tool-input-available` delivers the complete parsed input:
+
+```tsx
+const adapter: ChatAdapter = {
+ async sendMessage({ message }) {
+ return new ReadableStream({
+ start(controller) {
+ controller.enqueue({ type: 'start', messageId: 'msg-1' });
+
+ // Tool input streaming
+ controller.enqueue({
+ type: 'tool-input-start',
+ toolCallId: 'call-1',
+ toolName: 'get_weather',
+ });
+ controller.enqueue({
+ type: 'tool-input-delta',
+ toolCallId: 'call-1',
+ inputTextDelta: '{"city":',
+ });
+ controller.enqueue({
+ type: 'tool-input-delta',
+ toolCallId: 'call-1',
+ inputTextDelta: '"Paris"}',
+ });
+ controller.enqueue({
+ type: 'tool-input-available',
+ toolCallId: 'call-1',
+ toolName: 'get_weather',
+ input: { city: 'Paris' },
+ });
+
+ // Tool output
+ controller.enqueue({
+ type: 'tool-output-available',
+ toolCallId: 'call-1',
+ output: { temperature: 22, condition: 'sunny' },
+ });
+
+ controller.enqueue({ type: 'finish', messageId: 'msg-1' });
+ controller.close();
+ },
+ });
+ },
+};
+```
+
+## The `onToolCall` callback
+
+Register `onToolCall` on `ChatProvider` to observe every tool invocation state change during streaming:
+
+```tsx
+ {
+ console.log(`Tool "${toolCall.toolName}" is now ${toolCall.state}`);
+
+ if (toolCall.state === 'output-available') {
+ // Drive side effects — update dashboards, trigger notifications, etc.
+ }
+ }}
+>
+
+
+```
+
+The callback fires on every state change — not just when output is available. Use it for side effects outside the store: logging, analytics, and external API calls.
+
+### The `ChatOnToolCallPayload`
+
+```ts
+interface ChatOnToolCallPayload {
+ toolCall: ChatToolInvocation | ChatDynamicToolInvocation;
+}
+```
+
+The `toolCall` object includes `toolCallId`, `toolName`, `state`, `input`, `output`, `errorText`, and `approval` fields — all typed based on your `ChatToolDefinitionMap` augmentation.
+
+## Tool type registry
+
+Use TypeScript module augmentation to register typed tool definitions. This gives you type-safe `input` and `output` on tool invocations:
+
+```ts
+declare module '@mui/x-chat/types' {
+ interface ChatToolDefinitionMap {
+ get_weather: {
+ input: { city: string };
+ output: { temperature: number; condition: string };
+ };
+ search_docs: {
+ input: { query: string; limit?: number };
+ output: { results: Array<{ title: string; url: string }> };
+ };
+ }
+}
+```
+
+Once registered, `ChatToolInvocation<'get_weather'>` correctly types `input` as `{ city: string }` and `output` as `{ temperature: number; condition: string }`.
+
+## Dynamic tools
+
+For tools that are not known at compile time, use `ChatDynamicToolMessagePart` with the `dynamic: true` flag on the `tool-input-start` chunk:
+
+```ts
+controller.enqueue({
+ type: 'tool-input-start',
+ toolCallId: 'call-2',
+ toolName: 'user_defined_tool',
+ dynamic: true,
+});
+```
+
+Dynamic tool invocations use `ChatDynamicToolInvocation` with untyped `input` and `output` (`unknown`):
+
+```ts
+interface ChatDynamicToolInvocation {
+ toolCallId: string;
+ toolName: TToolName;
+ state: ChatToolInvocationState;
+ input?: unknown;
+ output?: unknown;
+ errorText?: string;
+ approval?: ChatToolApproval;
+ callProviderMetadata?: Record;
+}
+```
+
+## Rendering tool parts
+
+Register custom renderers for tool parts through the `partRenderers` prop on `ChatProvider`:
+
+```tsx
+const renderers: ChatPartRendererMap = {
+ tool: ({ part, message, index }) => ,
+};
+
+
+
+;
+```
+
+Use `useChatPartRenderer('tool')` inside any component to look up the registered renderer:
+
+```tsx
+function MessagePart({ part, message, index }) {
+ const renderer = useChatPartRenderer(part.type);
+
+ if (renderer) {
+ return renderer({ part, message, index });
+ }
+
+ return null;
+}
+```
+
+## See also
+
+- [Tool Approval](/x/react-chat/ai-and-agents/tool-approval/) for human-in-the-loop approval of tool calls.
+- [Streaming](/x/react-chat/behavior/streaming/) for the full stream chunk protocol reference.
+- [Step Tracking](/x/react-chat/ai-and-agents/step-tracking/) for multi-step agent progress tracking.
diff --git a/docs/data/chat/backend/adapters/adapters.md b/docs/data/chat/backend/adapters/adapters.md
new file mode 100644
index 0000000000000..939b0f8f3a1ba
--- /dev/null
+++ b/docs/data/chat/backend/adapters/adapters.md
@@ -0,0 +1,309 @@
+---
+productId: x-chat
+title: Adapters
+packageName: '@mui/x-chat'
+githubLabel: 'scope: chat'
+---
+
+# Chat - Adapters
+
+
The ChatAdapter interface is the single contract between your backend and the chat runtime. This page is the full interface reference.
+
+{{"component": "@mui/internal-core-docs/ComponentLinkHeader"}}
+
+`ChatBox` renders a fully styled chat surface, but it knows nothing about your backend on its own.
+The adapter is the single object that bridges them.
+It receives user messages, communicates with your server, and returns a streaming response that the runtime turns into live UI updates.
+
+## The `ChatAdapter` interface
+
+The full interface is generic over your pagination cursor type.
+The default cursor type is `string`, which covers the majority of REST and cursor-based APIs:
+
+```ts
+import type { ChatAdapter } from '@mui/x-chat/headless';
+
+interface ChatAdapter {
+ // Required
+ sendMessage(
+ input: ChatSendMessageInput,
+ ): Promise>;
+
+ // Optional — implement as your feature set grows
+ listConversations?(
+ input?: ChatListConversationsInput,
+ ): Promise>;
+ listMessages?(
+ input: ChatListMessagesInput,
+ ): Promise>;
+ reconnectToStream?(
+ input: ChatReconnectToStreamInput,
+ ): Promise | null>;
+ setTyping?(input: ChatSetTypingInput): Promise;
+ markRead?(input: ChatMarkReadInput): Promise;
+ subscribe?(
+ input: ChatSubscribeInput,
+ ): Promise | ChatSubscriptionCleanup;
+ addToolApprovalResponse?(input: ChatAddToolApproveResponseInput): Promise;
+ stop?(): void;
+}
+```
+
+Only `sendMessage` is required.
+Every other method is optional and incrementally adopted — start with just `sendMessage` and add methods as your product grows.
+
+## Required: `sendMessage`
+
+`sendMessage` is the heart of the adapter.
+It is called every time a user submits a message in the composer.
+
+```ts
+interface ChatSendMessageInput {
+ conversationId?: string; // the active conversation ID
+ message: ChatMessage; // the user's message being sent
+ messages: ChatMessage[]; // full thread context for the model
+ attachments?: ChatDraftAttachment[]; // file attachments from the composer
+ metadata?: Record; // arbitrary context from the composer
+ signal: AbortSignal; // connected to the stop button
+}
+```
+
+The method must return a `Promise>`.
+The runtime reads this stream, processes each chunk type, and updates the UI live.
+
+### Streaming protocol
+
+The stream must begin with a `start` chunk and end with `finish` or `abort`.
+Text arrives in `text-start` / `text-delta` / `text-end` triplets:
+
+```tsx
+const adapter: ChatAdapter = {
+ async sendMessage({ message }) {
+ return new ReadableStream({
+ start(controller) {
+ controller.enqueue({ type: 'start', messageId: 'msg-1' });
+ controller.enqueue({ type: 'text-start', id: 'text-1' });
+ controller.enqueue({ type: 'text-delta', id: 'text-1', delta: 'Hello!' });
+ controller.enqueue({ type: 'text-end', id: 'text-1' });
+ controller.enqueue({ type: 'finish', messageId: 'msg-1' });
+ controller.close();
+ },
+ });
+ },
+};
+```
+
+For the full chunk type reference, see [Streaming](/x/react-chat/behavior/streaming/).
+
+### Abort signal
+
+`input.signal` is an `AbortSignal` that fires when the user clicks the stop button.
+Pass it to your `fetch` call so the HTTP request is cancelled automatically:
+
+```tsx
+async sendMessage({ message, signal }) {
+ const res = await fetch('/api/chat', {
+ method: 'POST',
+ body: JSON.stringify({ message }),
+ signal, // browser cancels the request when the user stops
+ });
+ return res.body!;
+},
+```
+
+If your backend requires explicit cancellation (for example, sending a separate cancel request), implement the optional `stop()` method alongside the signal.
+
+## Optional methods
+
+The optional methods are listed roughly in the order you are likely to add them.
+None are required — the runtime detects which methods exist and activates the corresponding features automatically.
+
+### `listConversations(input?)`
+
+Implement this to populate the conversation sidebar when `ChatBox` mounts.
+The runtime calls it once on startup, before any user interaction.
+
+```ts
+interface ChatListConversationsInput {
+ cursor?: Cursor; // for paginated conversation lists
+ query?: string; // optional search query
+}
+
+interface ChatListConversationsResult {
+ conversations: ChatConversation[];
+ cursor?: Cursor; // next page cursor, if applicable
+ hasMore?: boolean; // whether additional pages exist
+}
+```
+
+### `listMessages(input)`
+
+Implement this to load message history when the user opens a conversation.
+The runtime calls it whenever `activeConversationId` changes to a conversation that has no messages in the store yet.
+
+```ts
+interface ChatListMessagesInput {
+ conversationId: string;
+ cursor?: Cursor;
+ direction?: 'forward' | 'backward'; // default: 'backward' (newest first)
+}
+
+interface ChatListMessagesResult {
+ messages: ChatMessage[];
+ cursor?: Cursor;
+ hasMore?: boolean;
+}
+```
+
+When `hasMore` is `true`, `ChatBox` shows a "Load earlier messages" control that calls `listMessages` again with the previous cursor.
+
+### `reconnectToStream(input)`
+
+Implement this to resume an interrupted stream — for example, when an SSE connection drops mid-response.
+The runtime calls it automatically after detecting a disconnected stream.
+
+```ts
+interface ChatReconnectToStreamInput {
+ conversationId?: string;
+ messageId?: string; // the message being streamed when the disconnect happened
+ signal: AbortSignal;
+}
+```
+
+Return `null` if the interrupted message cannot be resumed.
+
+### `setTyping(input)`
+
+Implement this to send a typing indicator to your backend when the user is composing a message.
+The runtime calls it when the composer value changes from empty to non-empty (and vice versa).
+
+```ts
+interface ChatSetTypingInput {
+ conversationId: string;
+ isTyping: boolean;
+}
+```
+
+To receive typing indicators from other users in the UI, implement `subscribe()` and emit `typing` events through the `onEvent` callback.
+
+### `markRead(input)`
+
+Implement this to signal to your backend that the user has seen a conversation or a specific message.
+The runtime does not call this automatically — call `adapter.markRead()` directly from your own UI event handler.
+
+```ts
+interface ChatMarkReadInput {
+ conversationId: string;
+ messageId?: string; // mark all messages up to this one as read
+}
+```
+
+### `subscribe(input)`
+
+Implement this to receive real-time events pushed from your backend — new messages, typing indicators, read receipts, or conversation updates.
+The runtime calls `subscribe()` on mount and invokes the returned cleanup function on unmount.
+
+```ts
+interface ChatSubscribeInput {
+ onEvent: (event: ChatRealtimeEvent) => void;
+}
+
+type ChatSubscriptionCleanup = () => void;
+```
+
+The cleanup function can also be returned from a `Promise` for async subscription setups.
+
+For the full list of realtime event types, see [Real-Time Adapters](/x/react-chat/backend/real-time-adapters/).
+
+### `addToolApprovalResponse(input)`
+
+Implement this when your backend supports human-in-the-loop tool confirmation.
+The runtime calls it when the user approves or denies a tool call that was flagged with a `tool-approval-request` stream chunk.
+
+```ts
+interface ChatAddToolApproveResponseInput {
+ id: string; // the approval request ID from the stream chunk
+ approved: boolean; // true = approved, false = denied
+ reason?: string; // optional reason surfaced to the model when denied
+}
+```
+
+### `stop()`
+
+Implement this when aborting the `signal` is not sufficient for server-side cleanup.
+The runtime calls `stop()` at the same moment the abort signal fires.
+
+```tsx
+stop() {
+ fetch('/api/chat/cancel', { method: 'POST' });
+},
+```
+
+Most adapters do not need `stop()` — passing `signal` to `fetch` is enough for HTTP-based transports.
+
+## Cursor generics
+
+`ChatAdapter` is generic over the type used for pagination cursors.
+The default is `string`, which covers opaque server cursors, Base64 tokens, and ISO timestamps.
+
+If your API uses a structured cursor, provide the type at the call site:
+
+```ts
+interface MyCursor {
+ page: number;
+ token: string;
+}
+
+const adapter: ChatAdapter = {
+ async sendMessage(input) {
+ /* ... */
+ },
+
+ async listMessages({ cursor }) {
+ // cursor is typed as MyCursor | undefined here
+ const page = cursor?.page ?? 1;
+ const res = await fetch(`/api/messages?page=${page}`);
+ const { messages, nextPage, token } = await res.json();
+ return {
+ messages,
+ cursor: { page: nextPage, token },
+ hasMore: !!nextPage,
+ };
+ },
+};
+```
+
+The cursor type flows automatically through `ChatBox`, the store, hooks, and all adapter input and output types.
+
+## Error handling
+
+:::info
+You do not need to catch errors inside adapter methods — the runtime handles them for you.
+:::
+
+When an adapter method throws, the runtime:
+
+1. Records a `ChatError` with the appropriate `source` field (`'send'`, `'stream'`, `'history'`, or `'adapter'`).
+2. Surfaces it through `ChatBox`'s built-in error UI, `useChat().error`, and the `onError` callback on `ChatBox`.
+3. Marks the error `recoverable` when applicable (for example, stream disconnects) and `retryable` when the user can reasonably try again.
+
+If you want to transform or enrich an error before the runtime sees it, throw a plain `Error` with a custom message.
+The runtime wraps it in a `ChatError` with source `'adapter'`.
+
+To handle errors at the application level, use the `onError` callback prop:
+
+```tsx
+ {
+ console.error('[Chat error]', error.source, error.message);
+ }}
+/>
+```
+
+## See also
+
+- [Building an Adapter](/x/react-chat/backend/building-an-adapter/) for a step-by-step tutorial.
+- [Real-Time Adapters](/x/react-chat/backend/real-time-adapters/) for the event types used by `subscribe()`.
+- [Streaming](/x/react-chat/behavior/streaming/) for the full stream chunk protocol reference.
+- [Hooks Reference](/x/react-chat/resources/hooks/) to see which runtime actions trigger adapter methods.
diff --git a/docs/data/chat/backend/building-an-adapter/building-an-adapter.md b/docs/data/chat/backend/building-an-adapter/building-an-adapter.md
new file mode 100644
index 0000000000000..d687985ce2b16
--- /dev/null
+++ b/docs/data/chat/backend/building-an-adapter/building-an-adapter.md
@@ -0,0 +1,286 @@
+---
+productId: x-chat
+title: Building an Adapter
+packageName: '@mui/x-chat'
+githubLabel: 'scope: chat'
+---
+
+# Chat - Building an Adapter
+
+
A step-by-step tutorial for implementing a custom ChatAdapter that connects your chat UI to any backend.
+
+{{"component": "@mui/internal-core-docs/ComponentLinkHeader"}}
+
+This page walks you through building a `ChatAdapter` from scratch.
+You will start with the minimum viable adapter (`sendMessage` only), then progressively add history loading and conversation management.
+
+## Step 1: The minimal adapter
+
+The only required method is `sendMessage`.
+It receives the user's message and must return a `ReadableStream` of typed chunks.
+
+```tsx
+import { ChatBox } from '@mui/x-chat';
+import type { ChatAdapter } from '@mui/x-chat/headless';
+
+const adapter: ChatAdapter = {
+ async sendMessage({ message, signal }) {
+ const res = await fetch('/api/chat', {
+ method: 'POST',
+ body: JSON.stringify({ message }),
+ signal,
+ });
+ return res.body!; // ReadableStream
+ },
+};
+
+export default function App() {
+ return ;
+}
+```
+
+The runtime handles streaming, message normalization, error surfacing, and state updates.
+Your adapter only needs to know how to talk to your backend.
+
+## Step 2: Implement streaming from scratch
+
+If your backend does not return a `ReadableStream` natively (for example, you are using a WebSocket or a custom protocol), you can construct the stream manually.
+
+The stream must begin with a `start` chunk and end with `finish` or `abort`.
+Text arrives in `text-start` / `text-delta` / `text-end` triplets:
+
+```tsx
+const adapter: ChatAdapter = {
+ async sendMessage({ message }) {
+ return new ReadableStream({
+ start(controller) {
+ controller.enqueue({ type: 'start', messageId: 'msg-1' });
+ controller.enqueue({ type: 'text-start', id: 'text-1' });
+ controller.enqueue({ type: 'text-delta', id: 'text-1', delta: 'Hello!' });
+ controller.enqueue({ type: 'text-end', id: 'text-1' });
+ controller.enqueue({ type: 'finish', messageId: 'msg-1' });
+ controller.close();
+ },
+ });
+ },
+};
+```
+
+For a real integration, you would typically read from your backend inside the `start` callback and enqueue chunks as they arrive:
+
+```tsx
+const adapter: ChatAdapter = {
+ async sendMessage({ message, signal }) {
+ return new ReadableStream({
+ async start(controller) {
+ const response = await fetch('/api/chat', {
+ method: 'POST',
+ body: JSON.stringify({
+ text: message.parts[0]?.type === 'text' ? message.parts[0].text : '',
+ }),
+ signal,
+ });
+
+ const reader = response.body!.getReader();
+ const decoder = new TextDecoder();
+ const messageId = `msg-${Date.now()}`;
+
+ controller.enqueue({ type: 'start', messageId });
+ controller.enqueue({ type: 'text-start', id: 'text-1' });
+
+ try {
+ while (true) {
+ const { done, value } = await reader.read();
+ if (done) break;
+ const text = decoder.decode(value, { stream: true });
+ controller.enqueue({ type: 'text-delta', id: 'text-1', delta: text });
+ }
+ controller.enqueue({ type: 'text-end', id: 'text-1' });
+ controller.enqueue({ type: 'finish', messageId });
+ } catch (error) {
+ controller.enqueue({ type: 'text-end', id: 'text-1' });
+ controller.enqueue({ type: 'abort', messageId });
+ } finally {
+ controller.close();
+ }
+ },
+ });
+ },
+};
+```
+
+## Step 3: Wire up the abort signal
+
+`input.signal` is an `AbortSignal` that fires when the user clicks the stop button.
+Pass it to your `fetch` call so the HTTP request is cancelled automatically:
+
+```tsx
+async sendMessage({ message, signal }) {
+ const res = await fetch('/api/chat', {
+ method: 'POST',
+ body: JSON.stringify({ message }),
+ signal, // browser cancels the request when the user stops
+ });
+ return res.body!;
+},
+```
+
+If your backend requires explicit cancellation (for example, sending a separate cancel request), implement the optional `stop()` method alongside the signal:
+
+```tsx
+stop() {
+ fetch('/api/chat/cancel', { method: 'POST' });
+},
+```
+
+## Step 4: Add message history with `listMessages`
+
+Implement `listMessages` to load history when the user opens a conversation.
+The runtime calls it whenever `activeConversationId` changes to a conversation that has no messages in the store yet.
+
+```tsx
+const adapter: ChatAdapter = {
+ async sendMessage(input) {
+ /* ... from Step 1 ... */
+ },
+
+ async listMessages({ conversationId, cursor }) {
+ const params = new URLSearchParams({ cursor: cursor ?? '' });
+ const res = await fetch(
+ `/api/conversations/${conversationId}/messages?${params}`,
+ );
+ const { messages, nextCursor, hasMore } = await res.json();
+ return { messages, cursor: nextCursor, hasMore };
+ },
+};
+```
+
+When `hasMore` is `true`, `ChatBox` shows a "Load earlier messages" control that calls `listMessages` again with the previous cursor.
+
+## Step 5: Add conversation listing with `listConversations`
+
+Implement `listConversations` to populate the conversation sidebar when `ChatBox` mounts.
+The runtime calls it once on startup, before any user interaction.
+
+```tsx
+const adapter: ChatAdapter = {
+ async sendMessage(input) {
+ /* ... */
+ },
+
+ async listMessages(input) {
+ /* ... from Step 4 ... */
+ },
+
+ async listConversations() {
+ const res = await fetch('/api/conversations');
+ const { conversations } = await res.json();
+ return { conversations };
+ },
+};
+```
+
+For paginated conversation lists, return a `cursor` and `hasMore`:
+
+```tsx
+async listConversations({ cursor }) {
+ const params = new URLSearchParams();
+ if (cursor) params.set('cursor', cursor);
+ const res = await fetch(`/api/conversations?${params}`);
+ const { conversations, nextCursor, hasMore } = await res.json();
+ return { conversations, cursor: nextCursor, hasMore };
+},
+```
+
+## Step 6: Handle stream reconnection
+
+Implement `reconnectToStream` to resume an interrupted stream — for example, when an SSE connection drops mid-response.
+The runtime calls it automatically after detecting a disconnected stream.
+
+```tsx
+async reconnectToStream({ conversationId, messageId, signal }) {
+ const params = new URLSearchParams();
+ if (conversationId) params.set('conversationId', conversationId);
+ if (messageId) params.set('messageId', messageId);
+ const res = await fetch('/api/chat/reconnect', {
+ method: 'POST',
+ body: params.toString(),
+ signal,
+ });
+ if (res.status === 404) return null; // message no longer resumable
+ return res.body!;
+},
+```
+
+Return `null` if the interrupted message cannot be resumed.
+
+## Complete adapter
+
+Here is a complete adapter that combines all the steps:
+
+```tsx
+import type { ChatAdapter } from '@mui/x-chat/headless';
+
+const adapter: ChatAdapter = {
+ async sendMessage({ message, signal }) {
+ const res = await fetch('/api/chat', {
+ method: 'POST',
+ body: JSON.stringify({ message }),
+ signal,
+ });
+ return res.body!;
+ },
+
+ async listMessages({ conversationId, cursor }) {
+ const params = new URLSearchParams({ cursor: cursor ?? '' });
+ const res = await fetch(
+ `/api/conversations/${conversationId}/messages?${params}`,
+ );
+ const { messages, nextCursor, hasMore } = await res.json();
+ return { messages, cursor: nextCursor, hasMore };
+ },
+
+ async listConversations() {
+ const res = await fetch('/api/conversations');
+ const { conversations } = await res.json();
+ return { conversations };
+ },
+
+ async reconnectToStream({ conversationId, messageId, signal }) {
+ const res = await fetch('/api/chat/reconnect', {
+ method: 'POST',
+ body: JSON.stringify({ conversationId, messageId }),
+ signal,
+ });
+ if (res.status === 404) return null;
+ return res.body!;
+ },
+};
+```
+
+## Error handling
+
+:::info
+You do not need to catch errors inside adapter methods — the runtime handles them for you.
+:::
+
+When an adapter method throws, the runtime records a `ChatError`, surfaces it through the built-in error UI and the `onError` callback, and marks the error `recoverable` or `retryable` when applicable.
+
+To handle errors at the application level:
+
+```tsx
+ {
+ console.error('[Chat error]', error.source, error.message);
+ }}
+/>
+```
+
+## See also
+
+- [Adapters](/x/react-chat/backend/adapters/) for the full interface reference.
+- [Streaming](/x/react-chat/behavior/streaming/) for the full stream chunk protocol reference.
+- [Real-Time Adapters](/x/react-chat/backend/real-time-adapters/) for adding `subscribe()`, `setTyping()`, and `markRead()`.
+
+## API
diff --git a/docs/data/chat/backend/controlled-state/controlled-state.md b/docs/data/chat/backend/controlled-state/controlled-state.md
new file mode 100644
index 0000000000000..8b07d2969dc81
--- /dev/null
+++ b/docs/data/chat/backend/controlled-state/controlled-state.md
@@ -0,0 +1,183 @@
+---
+productId: x-chat
+title: Controlled State
+packageName: '@mui/x-chat'
+githubLabel: 'scope: chat'
+components: ChatBox
+---
+
+# Chat - Controlled State
+
+
Own messages, conversations, active conversation, and composer value externally using the controlled/uncontrolled pattern on ChatProvider.
+
+{{"component": "@mui/internal-core-docs/ComponentLinkHeader"}}
+
+Every chat component reads its data — messages, conversations, composer state, streaming status — from a shared store provided by `ChatProvider`.
+`ChatBox` renders a `ChatProvider` internally, so in the simplest case you never interact with the provider directly.
+
+When you need more control — sharing state with components outside `ChatBox`, controlling the message list externally, or mounting multiple independent chat instances — you work with `ChatProvider` explicitly.
+
+## Controlled and uncontrolled state
+
+`ChatProvider` supports both controlled and uncontrolled patterns for every piece of state.
+In uncontrolled mode, state lives entirely inside the store.
+In controlled mode, you own the state and the provider synchronizes it.
+
+### Messages
+
+```tsx
+{/* Uncontrolled — internal store owns the messages */}
+
+
+{/* Controlled — you own the messages array */}
+
+```
+
+### Conversations
+
+```tsx
+{/* Uncontrolled */}
+
+
+{/* Controlled */}
+
+```
+
+### Composer value
+
+```tsx
+{/* Uncontrolled */}
+
+
+{/* Controlled */}
+
+```
+
+### Mixed mode
+
+You can mix controlled and uncontrolled state freely.
+For example, control the active conversation while letting messages be managed internally:
+
+```tsx
+
+```
+
+:::info
+Start with `initial*` (uncontrolled) props during prototyping, then switch to controlled props when you need to sync with external state. You can switch modes at any time without changing the runtime model.
+:::
+
+## State model reference
+
+| Model | Controlled prop | Initial prop | Change callback |
+| :------------------ | :--------------------- | :---------------------------- | :--------------------------- |
+| Messages | `messages` | `initialMessages` | `onMessagesChange` |
+| Conversations | `conversations` | `initialConversations` | `onConversationsChange` |
+| Active conversation | `activeConversationId` | `initialActiveConversationId` | `onActiveConversationChange` |
+| Composer value | `composerValue` | `initialComposerValue` | `onComposerValueChange` |
+
+## Sharing context across the app
+
+Place `ChatProvider` higher in the tree to share chat state with components that live outside the chat surface:
+
+```tsx
+function App() {
+ return (
+
+ {/* can use hooks like useChatStatus */}
+ {/* can use useConversations */}
+ {/* renders ChatBox or custom layout */}
+
+ );
+}
+```
+
+:::warning
+`ChatBox` always creates its own internal `ChatProvider`. If you need to share state with external components, wrap them in a single `ChatProvider` and use the individual themed components (`ChatMessageList`, `ChatComposer`, etc.) instead of `ChatBox`.
+:::
+
+## Syncing with a global state manager
+
+Use the controlled props with change callbacks to keep the chat store synchronized with your global state manager:
+
+```tsx
+function App() {
+ const dispatch = useAppDispatch();
+ const messages = useAppSelector(selectChatMessages);
+ const activeId = useAppSelector(selectActiveConversationId);
+
+ return (
+ dispatch(setChatMessages(msgs))}
+ activeConversationId={activeId}
+ onActiveConversationChange={(id) => dispatch(setActiveConversation(id))}
+ >
+
+
+ );
+}
+```
+
+The runtime still streams, normalizes, and derives selectors — you just own the source of truth.
+
+## Multiple independent instances
+
+Each `ChatProvider` creates an isolated store.
+To render multiple independent chat surfaces, use separate `ChatBox` instances — each one creates its own provider internally.
+
+## Provider props reference
+
+| Prop | Type | Description |
+| :---------------------------- | :---------------------------------------------- | :------------------------------------------------------ |
+| `adapter` | `ChatAdapter` | **Required.** The backend adapter |
+| `messages` | `ChatMessage[]` | Controlled messages |
+| `initialMessages` | `ChatMessage[]` | Initial messages (uncontrolled) |
+| `onMessagesChange` | `(messages) => void` | Called when messages change |
+| `conversations` | `ChatConversation[]` | Controlled conversations |
+| `initialConversations` | `ChatConversation[]` | Initial conversations (uncontrolled) |
+| `onConversationsChange` | `(conversations) => void` | Called when conversations change |
+| `activeConversationId` | `string` | Controlled active conversation |
+| `initialActiveConversationId` | `string` | Initial active conversation (uncontrolled) |
+| `onActiveConversationChange` | `(conversationId: string \| undefined) => void` | Called when active conversation changes |
+| `composerValue` | `string` | Controlled composer text |
+| `initialComposerValue` | `string` | Initial composer text (uncontrolled) |
+| `onComposerValueChange` | `(value) => void` | Called when composer value changes |
+| `members` | `ChatUser[]` | Participant metadata (avatars, names, roles) |
+| `currentUser` | `ChatUser` | The current user object |
+| `onToolCall` | `ChatOnToolCall` | Handler for tool call messages |
+| `onFinish` | `ChatOnFinish` | Called when a response finishes streaming |
+| `onData` | `ChatOnData` | Called for each streaming chunk |
+| `onError` | `(error) => void` | Called on adapter errors |
+| `streamFlushInterval` | `number` | Batching interval for streaming deltas (default: 16 ms) |
+| `storeClass` | `ChatStoreConstructor` | Custom store class (default: ChatStore) |
+| `partRenderers` | `ChatPartRendererMap` | Custom message part renderers |
+
+## See also
+
+- [Adapters](/x/react-chat/backend/adapters/) for the backend adapter interface.
+- [Hooks Reference](/x/react-chat/resources/hooks/) for reading state from the store.
+- [Events & Callbacks](/x/react-chat/resources/events-and-callbacks/) for `onFinish`, `onToolCall`, `onData`, and `onError`.
diff --git a/docs/data/chat/backend/real-time-adapters/real-time-adapters.md b/docs/data/chat/backend/real-time-adapters/real-time-adapters.md
new file mode 100644
index 0000000000000..b2023f974cfb7
--- /dev/null
+++ b/docs/data/chat/backend/real-time-adapters/real-time-adapters.md
@@ -0,0 +1,252 @@
+---
+productId: x-chat
+title: Real-Time Adapters
+packageName: '@mui/x-chat'
+githubLabel: 'scope: chat'
+---
+
+# Chat - Real-Time Adapters
+
+
Push typing indicators, presence updates, read receipts, and collection changes into the runtime through the adapter's subscribe() method.
+
+{{"component": "@mui/internal-core-docs/ComponentLinkHeader"}}
+
+The adapter's `subscribe()` method enables push-based updates from the backend.
+The runtime calls it on mount and cleans it up on unmount, keeping the subscription lifecycle fully managed.
+
+## Subscription lifecycle
+
+When `ChatProvider` mounts and the adapter implements `subscribe()`, the runtime:
+
+1. Calls `subscribe({ onEvent })` with a callback.
+2. Stores the returned cleanup function.
+3. On unmount, calls the cleanup function to close the connection.
+
+```ts
+const adapter: ChatAdapter = {
+ async sendMessage(input) {
+ /* ... */
+ },
+
+ subscribe({ onEvent }) {
+ const ws = new WebSocket('/api/realtime');
+ ws.onmessage = (event) => onEvent(JSON.parse(event.data));
+ return () => ws.close();
+ },
+};
+```
+
+The cleanup function can be returned directly or from a resolved promise, supporting both synchronous and asynchronous setup:
+
+```tsx
+async subscribe({ onEvent }) {
+ const sub = await myClient.subscribe((event) => onEvent(event));
+ return () => sub.unsubscribe();
+},
+```
+
+## Event types
+
+The `onEvent` callback receives `ChatRealtimeEvent` objects.
+There are nine event variants organized into five categories.
+
+### Conversation events
+
+| Event type | Payload | Store effect |
+| :--------------------- | :------------------- | :---------------------------------------------------------- |
+| `conversation-added` | `{ conversation }` | Adds the conversation to the store |
+| `conversation-updated` | `{ conversation }` | Replaces the conversation record |
+| `conversation-removed` | `{ conversationId }` | Removes the conversation and resets active ID if it matched |
+
+### Message events
+
+| Event type | Payload | Store effect |
+| :---------------- | :------------------------------- | :--------------------------------- |
+| `message-added` | `{ message }` | Adds the message to the store |
+| `message-updated` | `{ message }` | Replaces the message record |
+| `message-removed` | `{ messageId, conversationId? }` | Removes the message from the store |
+
+### Typing events
+
+| Event type | Payload | Store effect |
+| :--------- | :------------------------------------- | :------------------------------------------ |
+| `typing` | `{ conversationId, userId, isTyping }` | Updates the typing map for the conversation |
+
+### Presence events
+
+| Event type | Payload | Store effect |
+| :--------- | :--------------------- | :------------------------------------------------------- |
+| `presence` | `{ userId, isOnline }` | Updates `isOnline` on matching conversation participants |
+
+### Read events
+
+| Event type | Payload | Store effect |
+| :--------- | :---------------------------------------- | :------------------------------------ |
+| `read` | `{ conversationId, messageId?, userId? }` | Updates the conversation's read state |
+
+## Dispatching events from the backend
+
+Each event is a plain object with a `type` field.
+Here are the full shapes:
+
+```ts
+// Conversation events
+{ type: 'conversation-added', conversation: ChatConversation }
+{ type: 'conversation-updated', conversation: ChatConversation }
+{ type: 'conversation-removed', conversationId: string }
+
+// Message events
+{ type: 'message-added', message: ChatMessage }
+{ type: 'message-updated', message: ChatMessage }
+{ type: 'message-removed', messageId: string, conversationId?: string }
+
+// Typing
+{ type: 'typing', conversationId: string, userId: string, isTyping: boolean }
+
+// Presence
+{ type: 'presence', userId: string, isOnline: boolean }
+
+// Read
+{ type: 'read', conversationId: string, messageId?: string, userId?: string }
+```
+
+## The `setTyping()` method
+
+Implement `setTyping()` to send a typing indicator to your backend when the user is composing a message.
+The runtime calls it when the composer value changes from empty to non-empty (and vice versa).
+
+```ts
+interface ChatSetTypingInput {
+ conversationId: string;
+ isTyping: boolean;
+}
+```
+
+```tsx
+async setTyping({ conversationId, isTyping }) {
+ await fetch('/api/typing', {
+ method: 'POST',
+ body: JSON.stringify({ conversationId, isTyping }),
+ });
+},
+```
+
+To receive typing indicators from other users in the UI, implement `subscribe()` and emit `typing` events through the `onEvent` callback.
+
+## The `markRead()` method
+
+Implement `markRead()` to signal to your backend that the user has seen a conversation or a specific message.
+The runtime does not call this automatically — call `adapter.markRead()` directly from your own UI event handler.
+
+```ts
+interface ChatMarkReadInput {
+ conversationId: string;
+ messageId?: string; // mark all messages up to this one as read
+}
+```
+
+## Stream reconnection
+
+Implement `reconnectToStream()` to resume an interrupted stream — for example, when an SSE connection drops mid-response.
+The runtime calls it automatically after detecting a disconnected stream.
+
+```ts
+interface ChatReconnectToStreamInput {
+ conversationId?: string;
+ messageId?: string; // the message being streamed when the disconnect happened
+ signal: AbortSignal;
+}
+```
+
+Return `null` if the interrupted message cannot be resumed:
+
+```tsx
+async reconnectToStream({ conversationId, messageId, signal }) {
+ const params = new URLSearchParams();
+ if (conversationId) params.set('conversationId', conversationId);
+ if (messageId) params.set('messageId', messageId);
+ const res = await fetch('/api/chat/reconnect', {
+ method: 'POST',
+ body: params.toString(),
+ signal,
+ });
+ if (res.status === 404) return null; // message no longer resumable
+ return res.body!;
+},
+```
+
+## Consuming realtime state
+
+### Typing indicators
+
+Use `useChatStatus()` to get the list of users currently typing:
+
+```tsx
+function TypingIndicator() {
+ const { typingUserIds } = useChatStatus();
+
+ if (typingUserIds.length === 0) return null;
+
+ return {typingUserIds.length} user(s) typing...;
+}
+```
+
+The `typingUserIds` selector returns user IDs for the active conversation by default.
+For a specific conversation, use `chatSelectors.typingUserIds` with a conversation ID argument.
+
+### Presence
+
+Presence events update the `isOnline` field on `ChatUser` objects inside conversation participants.
+Use `useConversation(id)` or `useConversations()` to see participant presence.
+
+### Read state
+
+Read events update the `readState` and `unreadCount` fields on `ChatConversation`.
+Use `useConversation(id)` to reflect read status in the UI.
+
+## WebSocket integration example
+
+A complete adapter with WebSocket-based real-time events:
+
+```tsx
+import type { ChatAdapter } from '@mui/x-chat/headless';
+
+const adapter: ChatAdapter = {
+ async sendMessage({ message, signal }) {
+ const res = await fetch('/api/chat', {
+ method: 'POST',
+ body: JSON.stringify({ message }),
+ signal,
+ });
+ return res.body!;
+ },
+
+ subscribe({ onEvent }) {
+ const ws = new WebSocket('/api/ws');
+ ws.onmessage = (e) => onEvent(JSON.parse(e.data));
+ return () => ws.close(); // cleanup on unmount
+ },
+
+ async setTyping({ conversationId, isTyping }) {
+ await fetch('/api/typing', {
+ method: 'POST',
+ body: JSON.stringify({ conversationId, isTyping }),
+ });
+ },
+
+ async markRead({ conversationId, messageId }) {
+ await fetch('/api/read', {
+ method: 'POST',
+ body: JSON.stringify({ conversationId, messageId }),
+ });
+ },
+};
+```
+
+## See also
+
+- [Adapters](/x/react-chat/backend/adapters/) for the full adapter interface including `subscribe()`.
+- [Hooks Reference](/x/react-chat/resources/hooks/) for `useChatStatus()` and the typing/presence consumption pattern.
+- [Selectors Reference](/x/react-chat/resources/selectors/) for `chatSelectors.typingUserIds` and other store selectors.
+
+## API
diff --git a/docs/data/chat/basics/chatbox/chatbox.md b/docs/data/chat/basics/chatbox/chatbox.md
new file mode 100644
index 0000000000000..6ec9a8dd9ea4b
--- /dev/null
+++ b/docs/data/chat/basics/chatbox/chatbox.md
@@ -0,0 +1,137 @@
+---
+productId: x-chat
+title: ChatBox
+packageName: '@mui/x-chat'
+githubLabel: 'scope: chat'
+components: ChatBox
+---
+
+# Chat - ChatBox
+
+
The all-in-one component that renders a complete chat surface with a single import.
+
+{{"component": "@mui/internal-core-docs/ComponentLinkHeader"}}
+
+## Overview
+
+`ChatBox` is the fastest way to add a chat interface to your application.
+It creates a `ChatProvider` internally and composes every themed subcomponent — conversation list, thread header, message list, and composer — into a ready-to-use surface:
+
+```tsx
+import { ChatBox } from '@mui/x-chat';
+
+;
+```
+
+All visual styles are derived from your active Material UI theme.
+No additional configuration is needed — `ChatBox` reads `palette`, `typography`, `shape`, and `spacing` from the closest `ThemeProvider`.
+
+{{"demo": "../../material/examples/basic-ai-chat/BasicAiChat.js", "bg": "inline", "defaultCodeOpen": false}}
+
+## ChatBox vs. ChatProvider
+
+### ChatBox (all-in-one)
+
+`ChatBox` is the right choice when you want a complete chat surface with minimal setup.
+It creates a `ChatProvider` internally, so all hooks work inside any component rendered as a child or descendant of `ChatBox`:
+
+{{"demo": "../../material/context/ChatBoxWithHooks.js", "defaultCodeOpen": false, "bg": "inline"}}
+
+### ChatProvider (custom layout)
+
+When you need full control over the layout — for example, placing the conversation list in a sidebar and the thread in a main content area — use `ChatProvider` directly and compose the pieces yourself:
+
+{{"demo": "../../material/context/ChatProviderCustomLayout.js", "defaultCodeOpen": false, "bg": "inline"}}
+
+:::warning
+`ChatBox` always creates its own internal `ChatProvider`. If you need to share state with external components, wrap them in a single `ChatProvider` and use the individual themed components (`ChatMessageList`, `ChatComposer`, etc.) instead of `ChatBox`.
+:::
+
+## Feature flags
+
+`ChatBox` accepts a `features` prop that toggles built-in capabilities on or off:
+
+```tsx
+
+```
+
+Feature flags let you progressively enable functionality without replacing slots or restructuring the component tree.
+
+## Controlled and uncontrolled state
+
+`ChatBox` supports both controlled and uncontrolled patterns for every piece of state.
+These props are forwarded to the internal `ChatProvider`.
+
+### Messages
+
+```tsx
+{
+ /* Uncontrolled — internal store owns the messages */
+}
+;
+
+{
+ /* Controlled — you own the messages array */
+}
+;
+```
+
+### Conversations
+
+```tsx
+{
+ /* Uncontrolled */
+}
+;
+
+{
+ /* Controlled */
+}
+;
+```
+
+### Composer value
+
+```tsx
+{
+ /* Uncontrolled */
+}
+;
+
+{
+ /* Controlled */
+}
+;
+```
+
+You can mix controlled and uncontrolled state freely.
+For example, control the active conversation while letting messages be managed internally.
+
+## Multiple independent instances
+
+Each `ChatBox` creates its own isolated store.
+To render multiple independent chat surfaces side by side, use separate `ChatBox` instances:
+
+{{"demo": "../../material/context/MultipleInstances.js", "defaultCodeOpen": false, "bg": "inline"}}
diff --git a/docs/data/chat/basics/composer/composer.md b/docs/data/chat/basics/composer/composer.md
new file mode 100644
index 0000000000000..eb102bd663226
--- /dev/null
+++ b/docs/data/chat/basics/composer/composer.md
@@ -0,0 +1,161 @@
+---
+productId: x-chat
+title: Composer
+packageName: '@mui/x-chat'
+githubLabel: 'scope: chat'
+components: ChatComposerTextArea, ChatComposerSendButton, ChatComposerAttachmentList, ChatComposerHelperText, ChatComposerLabel, ChatComposerToolbar
+---
+
+# Chat - Composer
+
+
The text input area where users draft and send messages, with auto-resize, keyboard shortcuts, and a send button.
+
+{{"component": "@mui/internal-core-docs/ComponentLinkHeader"}}
+
+## Overview
+
+The composer is the input region at the bottom of the chat surface.
+`ChatComposer` provides Material UI styling — border, padding, and theme tokens are applied automatically.
+
+## Import
+
+```tsx
+import {
+ ChatComposer,
+ ChatComposerLabel,
+ ChatComposerTextArea,
+ ChatComposerSendButton,
+ ChatComposerAttachButton,
+ ChatComposerToolbar,
+ ChatComposerHelperText,
+} from '@mui/x-chat';
+```
+
+:::info
+When using `ChatBox`, the composer is already included.
+Import these components directly only when building a custom layout.
+:::
+
+## Component anatomy
+
+Inside `ChatBox`, the composer renders the following structure:
+
+```text
+ChatComposer ←