Skip to content

Commit

Permalink
feat (ui): generate and forward message ids for response messages (#4415
Browse files Browse the repository at this point in the history
)
  • Loading branch information
lgrammel authored Jan 16, 2025
1 parent 9ad9bf4 commit 00114c5
Show file tree
Hide file tree
Showing 36 changed files with 1,064 additions and 359 deletions.
6 changes: 6 additions & 0 deletions .changeset/hungry-buses-hear.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
'@ai-sdk/provider-utils': patch
'ai': patch
---

feat: expose IDGenerator and createIdGenerator
6 changes: 6 additions & 0 deletions .changeset/itchy-cobras-deliver.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
'@ai-sdk/ui-utils': patch
'ai': patch
---

feat (ui): generate and forward message ids for response messages
43 changes: 43 additions & 0 deletions content/docs/04-ai-sdk-ui/03-chatbot-message-persistence.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -181,3 +181,46 @@ export async function saveChat({

// ... rest of the file
```

## Message IDs

In addition to a chat ID, each message has an ID.
You can use this message ID to e.g. manipulate individual messages.

The IDs for user messages are generated by the `useChat` hook on the client,
and the IDs for AI response messages are generated by `streamText`.

You can control the ID format by providing ID generators:

```tsx filename="ui/chat.tsx" highlight="8-12"
import { createIdGenerator } from 'ai';
import { useChat } from 'ai/react';

const {
// ...
} = useChat({
// ...
// id format for client-side messages:
generateId: createIdGenerator({
prefix: 'msgc',
size: 16,
}),
});
```

```tsx filename="app/api/chat/route.ts" highlight="7-11"
import { createIdGenerator, streamText } from 'ai';

export async function POST(req: Request) {
// ...
const result = streamText({
// ...
// id format for server-side messages:
experimental_generateMessageId: createIdGenerator({
prefix: 'msgs',
size: 16,
}),
});
// ...
}
```
12 changes: 12 additions & 0 deletions content/docs/04-ai-sdk-ui/50-stream-protocol.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,18 @@ Example: `a:{"toolCallId":"call-123","result":"tool output"}\n`
height={1148}
/>

### Start Step Part

A part indicating the start of a step.

It includes the following metadata:

- `messageId` to indicate the id of the message that this step belongs to.

Format: `f:{id:string}\n`

Example: `f:{"id":"step_123"}\n`

### Finish Step Part

A part indicating that a step (i.e., one LLM API call in the backend) has been completed.
Expand Down
11 changes: 9 additions & 2 deletions content/docs/07-reference/01-ai-sdk-core/01-generate-text.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -406,6 +406,13 @@ To see `generateText` in action, check out [these examples](#examples).
description:
'Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. A maximum number is required to prevent infinite loops in the case of misconfigured tools. By default, it is set to 1.',
},
{
name: 'experimental_generateMessageId',
type: '() => string',
isOptional: true,
description:
'Function used to generate a unique ID for each message. This is an experimental feature.',
},
{
name: 'experimental_continueSteps',
type: 'boolean',
Expand Down Expand Up @@ -788,7 +795,7 @@ To see `generateText` in action, check out [these examples](#examples).
},
{
name: 'messages',
type: 'Array<CoreAssistantMessage | CoreToolMessage>',
type: 'Array<ResponseMessage>',
description:
'The response messages that were generated during the call. It consists of an assistant message, potentially containing tool calls. When there are tool results, there is an additional tool message with the tool results that are available. If there are tools that do not have execute functions, they are not included in the tool results and need to be added separately.',
},
Expand Down Expand Up @@ -932,7 +939,7 @@ To see `generateText` in action, check out [these examples](#examples).
},
{
name: 'messages',
type: 'Array<CoreAssistantMessage | CoreToolMessage>',
type: 'Array<ResponseMessage>',
description:
'The response messages that were generated during the call. It consists of an assistant message, potentially containing tool calls. When there are tool results, there is an additional tool message with the tool results that are available. If there are tools that do not have execute functions, they are not included in the tool results and need to be added separately.',
},
Expand Down
170 changes: 116 additions & 54 deletions content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,13 @@ To see `streamText` in action, check out [these examples](#examples).
description:
'Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. A maximum number is required to prevent infinite loops in the case of misconfigured tools. By default, it is set to 1.',
},
{
name: 'experimental_generateMessageId',
type: '() => string',
isOptional: true,
description:
'Function used to generate a unique ID for each message. This is an experimental feature.',
},
{
name: 'experimental_continueSteps',
type: 'boolean',
Expand Down Expand Up @@ -978,7 +985,7 @@ To see `streamText` in action, check out [these examples](#examples).
},
{
name: 'messages',
type: 'Array<CoreAssistantMessage | CoreToolMessage>',
type: 'Array<ResponseMessage>',
description:
'The response messages that were generated during the call. It consists of an assistant message, potentially containing tool calls. When there are tool results, there is an additional tool message with the tool results that are available. If there are tools that do not have execute functions, they are not included in the tool results and need to be added separately.',
},
Expand Down Expand Up @@ -1115,7 +1122,7 @@ To see `streamText` in action, check out [these examples](#examples).
},
{
name: 'messages',
type: 'Array<CoreAssistantMessage | CoreToolMessage>',
type: 'Array<ResponseMessage>',
description:
'The response messages that were generated during the call. It consists of an assistant message, potentially containing tool calls. When there are tool results, there is an additional tool message with the tool results that are available. If there are tools that do not have execute functions, they are not included in the tool results and need to be added separately.',
},
Expand Down Expand Up @@ -1247,7 +1254,7 @@ To see `streamText` in action, check out [these examples](#examples).
},
{
name: 'messages',
type: 'Array<CoreAssistantMessage | CoreToolMessage>',
type: 'Array<ResponseMessage>',
description:
'The response messages that were generated during the call. It consists of an assistant message, potentially containing tool calls. When there are tool results, there is an additional tool message with the tool results that are available. If there are tools that do not have execute functions, they are not included in the tool results and need to be added separately.',
},
Expand Down Expand Up @@ -1420,14 +1427,37 @@ To see `streamText` in action, check out [these examples](#examples).
parameters: [
{
name: 'type',
type: "'error'",
description: 'The type to identify the object as error.',
type: "'step-start'",
description: 'Indicates the start of a new step in the stream.',
},
{
name: 'error',
type: 'Error',
name: 'messageId',
type: 'string',
description: 'The ID of the assistant message that started the step.',
},
{
name: 'request',
type: 'RequestMetadata',
description:
'Describes the error that may have occurred during execution.',
'Information about the request that was sent to the language model provider.',
properties: [
{
type: 'RequestMetadata',
parameters: [
{
name: 'body',
type: 'string',
description:
'Raw request HTTP body that was sent to the provider API as a string.',
},
],
},
],
},
{
name: 'warnings',
type: 'Warning[]',
description: 'Warnings from the model provider (e.g. unsupported settings).',
},
],
},
Expand All @@ -1437,46 +1467,24 @@ To see `streamText` in action, check out [these examples](#examples).
{
name: 'type',
type: "'step-finish'",
description: 'The type to identify the object as step finish.',
description: 'Indicates the end of the current step in the stream.',
},
{
name: 'finishReason',
type: "'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other' | 'unknown'",
description: 'The reason the model finished generating the text.',
name: 'messageId',
type: 'string',
description: 'The ID of the assistant message that ended the step.',
},
{
name: 'usage',
type: 'TokenUsage',
description: 'The token usage of the generated text.',
properties: [
{
type: 'TokenUsage',
parameters: [
{
name: 'promptTokens',
type: 'number',
description: 'The total number of tokens in the prompt.',
},
{
name: 'completionTokens',
type: 'number',
description:
'The total number of tokens in the completion.',
},
{
name: 'totalTokens',
type: 'number',
description: 'The total number of tokens generated.',
},
],
},
],
name: 'logprobs',
type: 'LogProbs',
isOptional: true,
description:
'Optional log probabilities for tokens returned by some providers.',
},
{
name: 'request',
type: 'RequestMetadata',
isOptional: true,
description: 'Request metadata.',
description: 'Information about the request that was sent to the language model provider.',
properties: [
{
type: 'RequestMetadata',
Expand All @@ -1491,11 +1499,16 @@ To see `streamText` in action, check out [these examples](#examples).
},
],
},
{
name: 'warnings',
type: 'Warning[]',
isOptional: true,
description: 'Warnings from the model provider (e.g. unsupported settings).',
},
{
name: 'response',
type: 'ResponseMetadata',
isOptional: true,
description: 'Response metadata.',
description: 'Response metadata from the language model provider.',
properties: [
{
type: 'ResponseMetadata',
Expand Down Expand Up @@ -1528,10 +1541,37 @@ To see `streamText` in action, check out [these examples](#examples).
],
},
{
name: 'warnings',
type: 'Warning[] | undefined',
description:
'Warnings from the model provider (e.g. unsupported settings).',
name: 'usage',
type: 'TokenUsage',
description: 'The token usage of the generated text.',
properties: [
{
type: 'TokenUsage',
parameters: [
{
name: 'promptTokens',
type: 'number',
description: 'The total number of tokens in the prompt.',
},
{
name: 'completionTokens',
type: 'number',
description:
'The total number of tokens in the completion.',
},
{
name: 'totalTokens',
type: 'number',
description: 'The total number of tokens generated.',
},
],
},
],
},
{
name: 'finishReason',
type: "'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other' | 'unknown'",
description: 'The reason the model finished generating the text.',
},
{
name: 'experimental_providerMetadata',
Expand Down Expand Up @@ -1590,13 +1630,26 @@ To see `streamText` in action, check out [these examples](#examples).
],
},
{
name: 'response',
type: 'Response',
name: 'experimental_providerMetadata',
type: 'Record<string,Record<string,JSONValue>> | undefined',
isOptional: true,
description: 'Response metadata.',
description:
'Optional metadata from the provider. The outer key is the provider name. The inner values are the metadata. Details depend on the provider.',
},
{
name: 'logprobs',
type: 'LogProbs',
isOptional: true,
description:
'Optional log probabilities for tokens returned by some providers.',
},
{
name: 'response',
type: 'ResponseMetadata',
description: 'Response metadata from the language model provider.',
properties: [
{
type: 'Response',
type: 'ResponseMetadata',
parameters: [
{
name: 'id',
Expand Down Expand Up @@ -1625,12 +1678,21 @@ To see `streamText` in action, check out [these examples](#examples).
},
],
},
],
},
{
type: 'TextStreamPart',
parameters: [
{
name: 'experimental_providerMetadata',
type: 'Record<string,Record<string,JSONValue>> | undefined',
isOptional: true,
name: 'type',
type: "'error'",
description: 'The type to identify the object as error.',
},
{
name: 'error',
type: 'unknown',
description:
'Optional metadata from the provider. The outer key is the provider name. The inner values are the metadata. Details depend on the provider.',
'Describes the error that may have occurred during execution.',
},
],
},
Expand Down
Loading

0 comments on commit 00114c5

Please sign in to comment.