Skip to content

Commit

Permalink
feat(wenxin-adapter): support tool call and optimize message handling
Browse files Browse the repository at this point in the history
- Add support for tool call functionality in Wenxin adapter
- Implement reasoning content logging
- Update message chunk conversion logic to handle new message types
- Refactor API request process to use model name directly
- Remove unnecessary access token handling
  • Loading branch information
dingyi222666 committed Feb 11, 2025
1 parent 7a2b2d6 commit d06fad7
Show file tree
Hide file tree
Showing 9 changed files with 222 additions and 265 deletions.
4 changes: 4 additions & 0 deletions packages/openai-like-adapter/src/requester.ts
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,10 @@ export class OpenAIRequester
}
}
}

if (reasoningContent.length > 0) {
logger.debug(`reasoning content: ${reasoningContent}`)
}
} catch (e) {
if (e instanceof ChatLunaError) {
throw e
Expand Down
2 changes: 1 addition & 1 deletion packages/openai-like-adapter/src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ export function langchainMessageToOpenAIMessage(
const result: ChatCompletionResponseMessage[] = []

for (const rawMessage of messages) {
const role = messageTypeToOpenAIRole(rawMessage._getType())
const role = messageTypeToOpenAIRole(rawMessage.getType())

const msg = {
content: (rawMessage.content as string) || null,
Expand Down
89 changes: 24 additions & 65 deletions packages/wenxin-adapter/src/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,72 +40,31 @@ export class WenxinClient extends PlatformModelAndEmbeddingsClient<ClientConfig>
await this.getModels()
}

/* // eslint-disable-next-line @typescript-eslint/naming-convention
'ERNIE-4.0': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-4.0-turbo-8k
'ERNIE-4.0-turbo': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-4.0-turbo-8k?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions
'ERNIE-3.5': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-3.5-128k
'ERNIE-3.5-128k': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-3.5-128k?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-speed-pro-128k
'ERNIE-speed-pro': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-speed-pro-128k?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie_speed
'ERNIE-speed': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie_speed?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-speed-128k
'ERNIE-speed-128k': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-speed-128k?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-lite-8k
'ERNIE-lite': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-lite-8k?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-lite-pro-128k
'ERNIE-lite-pro': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-lite-pro-128k?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-tiny-8k
'ERNIE-tiny': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-tiny-8k?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-novel-8k
'ERNIE-novel': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-novel-8k?access_token=${accessToken}`
},
// https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-char-8k
'ERNIE-char': (accessToken: string) => {
return `https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/ernie-char-8k?access_token=${accessToken}`
} */

async refreshModels(): Promise<ModelInfo[]> {
const rawModels = [
['ERNIE-4.0', 8000],
['ERNIE-4.0-turbo', 8000],
['ERNIE-3.5', 4096],
['ERNIE-3.5-128k', 128000],
['ERNIE-speed-pro', 128000],
['ERNIE-speed', 4096],
['ERNIE-speed-128k', 128000],
['ERNIE-lite', 8000],
['ERNIE-lite-pro', 128000],
['ERNIE-tiny', 8000],
['ERNIE-novel', 8000],
['ERNIE-character', 8000]
['ernie-4.0-8k', 8000], // ERNIE-4.0-8K
['ernie-4.0-8k-preview', 8000], // ERNIE-4.0-8K-Preview
['ernie-4.0-8k-latest', 8000], // ERNIE-4.0-8K-Latest
['ernie-4.0-turbo-8k', 8000], // ERNIE-4.0-Turbo-8K
['ernie-4.0-turbo-8k-preview', 8000], // ERNIE-4.0-Turbo-8K-Preview
['ernie-4.0-turbo-8k-latest', 8000], // ERNIE-4.0-Turbo-8K-Latest
['ernie-4.0-turbo-128k', 128000], // ERNIE-4.0-Turbo-128K
['ernie-3.5-8k', 4096], // ERNIE-3.5-8K
['ernie-3.5-8k-preview', 4096], // ERNIE-3.5-8K-Preview
['ernie-3.5-128k', 128000], // ERNIE-3.5-128K
['ernie-speed-pro-128k', 128000], // ERNIE-Speed-Pro-128K
['ernie-speed-8k', 4096], // ERNIE-Speed-8K
['ernie-speed-128k', 128000], // ERNIE-Speed-128K
['ernie-character-8k', 8000], // ERNIE-Character-8K
['ernie-character-fiction-8k', 8000], // ERNIE-Character-Fiction-8K
['ernie-lite-8k', 8000], // ERNIE-Lite-8K
['ernie-lite-pro-128k', 128000], // ERNIE-Lite-Pro-128K
['ernie-tiny-8k', 8000], // ERNIE-Tiny-8K
['ernie-novel-8k', 8000], // ERNIE-Novel-8K
['deepseek-v3', 128000], // DeepSeek-V3 (未提供上下文大小)
['deepseek-r1', 128000], // DeepSeek-R1 (未提供上下文大小)
['deepseek-r1-distill-qwen-32b', 8000], // DeepSeek-R1-Distill-Qwen-32B (未提供上下文大小)
['deepseek-r1-distill-qwen-14b', 8000] // DeepSeek-R1-Distill-Qwen-14B (未提供上下文大小)
] as [string, number][]

return rawModels
Expand All @@ -120,7 +79,7 @@ export class WenxinClient extends PlatformModelAndEmbeddingsClient<ClientConfig>
})
.concat([
{
name: 'text-embedding',
name: 'embedding-v1',
type: ModelType.embeddings,
functionCall: false,
supportMode: ['all'],
Expand Down
23 changes: 12 additions & 11 deletions packages/wenxin-adapter/src/index.ts
Original file line number Diff line number Diff line change
@@ -1,18 +1,22 @@
import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat'
import { Context, Schema } from 'koishi'
import { Context, Logger, Schema } from 'koishi'
import { WenxinClient } from './client'
import { createLogger } from 'koishi-plugin-chatluna/utils/logger'

export let logger: Logger
export function apply(ctx: Context, config: Config) {
const plugin = new ChatLunaPlugin(ctx, config, 'wenxin')

logger = createLogger(ctx, 'chatluna-wenxin-adapter')

ctx.on('ready', async () => {
plugin.registerToService()

await plugin.parseConfig((config) => {
return config.apiKeys.map(([apiKey, apiEndpoint]) => {
return config.apiKeys.map((apiKey) => {
return {
apiKey,
apiEndpoint,
apiEndpoint: '',
platform: 'wenxin',
chatLimit: config.chatTimeLimit,
timeout: config.timeout,
Expand All @@ -32,7 +36,7 @@ export function apply(ctx: Context, config: Config) {
}

export interface Config extends ChatLunaPlugin.Config {
apiKeys: [string, string][]
apiKeys: string[]
maxTokens: number
temperature: number
presencePenalty: number
Expand All @@ -44,15 +48,12 @@ export const Config: Schema<Config> = Schema.intersect([
ChatLunaPlugin.Config,
Schema.object({
apiKeys: Schema.array(
Schema.tuple([
Schema.string().role('secret').required(),
Schema.string().role('secret').default('')
])
).default([['', '']])
Schema.string().role('secret').required()
).default([''])
}),
Schema.object({
maxTokens: Schema.number().min(16).max(128000).step(16).default(1024),
temperature: Schema.percent().min(0).max(1).step(0.1).default(0.8),
maxTokens: Schema.number().min(16).max(1280000).step(16).default(4096),
temperature: Schema.percent().min(0).max(2).step(0.1).default(0.8),
presencePenalty: Schema.number()
.min(1.0)
.max(2.0)
Expand Down
1 change: 0 additions & 1 deletion packages/wenxin-adapter/src/locales/en-US.schema.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ $inner:
$desc: 'Baidu Qianfan platform auth params (API Key, Secret Key)'
$inner:
- 'API Key for Baidu Qianfan platform'
- 'Secret Key for Baidu Qianfan platform'

- $desc: 'Model Parameters'
maxTokens: 'Max input tokens (16-12000, multiple of 16). Note: >2000 for 8k+ token models'
Expand Down
3 changes: 1 addition & 2 deletions packages/wenxin-adapter/src/locales/zh-CN.schema.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@ $inner:
- {}
- $desc: 请求选项
apiKeys:
$desc: 百度千帆大模型平台应用的鉴权参数列表(API Key, Secret Key)
$desc: 百度千帆大模型平台应用的鉴权参数列表API Key。
$inner:
- 百度千帆大模型平台应用的 API Key
- 百度千帆大模型平台应用的 Secret Key

- $desc: 模型配置
maxTokens: 输入的最大上下文 Token(16~12000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。
Expand Down
Loading

0 comments on commit d06fad7

Please sign in to comment.