Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Vapi/聊天接口(通用).md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
\# 聊天接口(通用) ## OpenAPI Specification ```yaml openapi: 3.0.1 info: title: '' description: '' version: 1.0.0 paths: /v1/chat/completions: post: summary: 聊天接口(通用) deprecated: false description: >- 只提供简单的请求示例,更详细的API接口使用说明 [请阅读官方文档](https://platform.openai.com/docs/api-reference/chat) 注意:OpenAI、Claude、Gemini...等聊天模型均使用该接口格式。 tags: - 聊天模型(Chat) parameters: - name: Content-Type in: header description: '' required: true example: application/json schema: type: string requestBody: content: application/json: schema: type: object properties: model: type: string title: 模型名称 x-apifox-mock: gpt-4o messages: type: array items: type: object properties: role: type: string title: 角色 system | user x-apifox-mock: user content: type: string title: 提问消息 x-apifox-mock: 早上好! x-apifox-orders: - role - content required: - role - content temperature: type: integer description: >- 使用什么采样温度,介于 0 和 2 之间。较高的值(如 0.8)将使输出更加随机,而较低的值(如 0.2)将使输出更加集中和确定。 我们通常建议改变这个或`top_p`但不是两者同时使用。 title: 温度 top_p: type: integer description: >- 一种替代温度采样的方法,称为核采样,其中模型考虑具有 top_p 概率质量的标记的结果。所以 0.1 意味着只考虑构成前 10% 概率质量的标记。 我们通常建议改变这个或`temperature`但不是两者同时使用。 stream: type: boolean title: 流式输出 description: 流式输出或非流式输出 max_tokens: type: number description: 聊天完成时生成的最大Tokens数量。 输入标记和生成标记的总长度受模型上下文长度的限制。 title: 最大回复 'n': type: number description: 为每个输入消息生成多少个聊天完成选项。 presence_penalty: type: integer description: '-2.0 和 2.0 之间的数字。正值会根据到目前为止是否出现在文本中来惩罚新标记,从而增加模型谈论新主题的可能性。' frequency_penalty: type: integer description: '-2.0 和 2.0 之间的数字。正值会根据新标记在文本中的现有频率对其进行惩罚,从而降低模型逐字重复同一行的可能性。' logit_bias: type: 'null' description: >- 修改指定标记出现在完成中的可能性。 接受一个 json 对象,该对象将标记(由标记器中的标记 ID 指定)映射到从 -100 到 100 的关联偏差值。从数学上讲,偏差会在采样之前添加到模型生成的 logits 中。确切的效果因模型而异,但 -1 和 1 之间的值应该会减少或增加选择的可能性;像 -100 或 100 这样的值应该导致相关令牌的禁止或独占选择。 user: type: string description: >- 代表您的最终用户的唯一标识符,可以帮助 OpenAI 监控和检测滥用行为。[了解更多](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids) stop: type: array items: type: string description: 传入该参数后,当遇到匹配的关键词 请求自动停止并完成。可传入字符串或字符串数组。 x-apifox-orders: - model - messages - temperature - top_p - max_tokens - stream - 'n' - presence_penalty - frequency_penalty - logit_bias - stop - user required: - model - messages example: model: gpt-4o messages: - role: user content: 晚上好 max_tokens: 1688 temperature: 0.5 stream: false responses: '200': description: '' content: application/json: schema: type: object properties: id: type: string object: type: string created: type: integer model: type: string choices: type: array items: type: object properties: index: type: integer message: type: object properties: role: type: string content: type: string refusal: type: 'null' required: - role - content - refusal x-apifox-orders: - role - content - refusal logprobs: type: 'null' finish_reason: type: string x-apifox-orders: - index - message - logprobs - finish_reason usage: type: object properties: prompt_tokens: type: integer completion_tokens: type: integer total_tokens: type: integer required: - prompt_tokens - completion_tokens - total_tokens x-apifox-orders: - prompt_tokens - completion_tokens - total_tokens system_fingerprint: type: string required: - id - object - created - model - choices - usage - system_fingerprint x-apifox-orders: - id - object - created - model - choices - usage - system_fingerprint example: id: chatcmpl-A1iMgDLzZtUJ9QDpfqDLxKH0zfUnp object: chat.completion created: 1724972230 model: gpt-4o-2024-05-13 choices: - index: 0 message: role: assistant content: 晚上好!有什么我可以帮你的吗? refusal: null logprobs: null finish_reason: stop usage: prompt_tokens: 9 completion_tokens: 10 total_tokens: 19 system_fingerprint: fp_157b3831f5 headers: {} x-apifox-name: 成功 security: - bearer: [] x-apifox-folder: 聊天模型(Chat) x-apifox-status: released x-run-in-apifox: https://app.apifox.com/web/project/5076588/apis/api-210153849-run components: schemas: {} securitySchemes: bearer: type: http scheme: bearer servers: - url: https://api.gpt.ge description: 线上 security: - bearer: [] ```
328 changes: 162 additions & 166 deletions aitoearn_web/server/aitoearn-geteway/src/core/tools/ai.service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,50 @@ export enum FireflycardTempTypes {
}
@Injectable()
export class AiToolsService {
openai: OpenAI
private baseURL: string = 'https://api.gpt.ge' // 新的API基础URL

constructor(private readonly redisService: RedisService) {
this.openai = new OpenAI({
apiKey: config.ai.qwenKey,
baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
})
// 不再初始化OpenAI客户端,直接使用HTTP请求
}

/**
* 通用聊天接口调用
*/
private async chatCompletion(params: {
model: string;
messages: Array<{ role: string; content: string }>;
temperature?: number;
max_tokens?: number;
stream?: boolean;
}): Promise<any> {
const requestBody = {
model: params.model || 'gpt-4o',
messages: params.messages,
temperature: params.temperature || 0.7,
max_tokens: params.max_tokens || 1000,
stream: params.stream || false,
};

try {
const response = await fetch(`${this.baseURL}/v1/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${config.ai.qwenKey}`, // 复用现有的key
},
body: JSON.stringify(requestBody),
});

if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}

const data = await response.json();
return data;
} catch (error) {
console.error('Chat completion error:', error);
throw error;
}
}

/**
Expand All @@ -44,56 +81,29 @@ export class AiToolsService {
* @param max
* @returns
*/
async videoAiTitle(url: string, min?: 5, max?: 50) {
const completion = await this.openai.chat.completions.create({
model: 'qwen-omni-turbo', // 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
stream: true,
messages: [
{
role: 'system',
content: [
{
type: 'text',
text: '你是一个短视频创作者,请帮我作品进行智能标题设置,只需要返回标题',
},
],
},
{
role: 'user',
content: [
{
type: 'video_url',
video_url: {
url,
},
},
{
type: 'text',
text: `给视频设置一个标题,长度为${min}到${max}个字. 只需要返回该标题`,
},
] as any,
},
],
stream_options: {
include_usage: true,
},
modalities: ['text'],
})
let res = ''

for await (const chunk of completion) {
if (Array.isArray(chunk.choices) && chunk.choices.length > 0) {
const content = chunk.choices[0].delta.content
if (content === null)
return res
res += content
}
else {
console.log(chunk.usage)
}
async videoAiTitle(url: string, min = 5, max = 50): Promise<string> {
try {
const result = await this.chatCompletion({
model: 'gpt-4o',
messages: [
{
role: 'system',
content: '你是一个短视频创作者,请帮我作品进行智能标题设置,只需要返回标题',
},
{
role: 'user',
content: `给视频设置一个标题,长度为${min}到${max}个字. 只需要返回该标题。视频URL: ${url}`,
},
],
temperature: 0.8,
max_tokens: max + 20,
});

return result?.choices?.[0]?.message?.content || '';
} catch (error) {
console.error('videoAiTitle error:', error);
return '';
}

return res
}

/**
Expand All @@ -110,52 +120,29 @@ export class AiToolsService {
desc = '无',
max = 50,
): Promise<string> {
const completion = await this.openai.chat.completions.create({
stream: true,
model: 'qwen-omni-turbo',
messages: [
{
role: 'system',
content: `你是我的好友,请对我发的短视频的作品或者朋友圈短视频作品进行评论,我会提供作品的封面图. 请用中文回复,并且回复内容不超过${max}字.只需要返回评论内容.`,
},
{
role: 'user',
content: [
{
type: 'image_url',
image_url: {
url: imgUrl,
},
},
{
type: 'text',
text: `作品标题: ${title}, 作品描述: ${desc}`,
},
],
},
],
stream_options: {
include_usage: true,
},
modalities: ['text'],
})

let res = ''

for await (const chunk of completion) {
if (Array.isArray(chunk.choices) && chunk.choices.length > 0) {
console.log(chunk.choices[0].delta)
const content = chunk.choices[0].delta.content
if (content === null)
return res
res += content
}
else {
console.log(chunk.usage)
}
try {
// 注意:图片处理可能需要特殊处理,这里简化处理
const result = await this.chatCompletion({
model: 'gpt-4o',
messages: [
{
role: 'system',
content: `你是我的好友,请对我发的短视频的作品或者朋友圈短视频作品进行评论,我会提供作品的信息. 请用中文回复,并且回复内容不超过${max}字.只需要返回评论内容.`,
},
{
role: 'user',
content: `作品标题: ${title}, 作品描述: ${desc}, 图片URL: ${imgUrl}`,
},
],
temperature: 0.8,
max_tokens: max + 20,
});

return result?.choices?.[0]?.message?.content || '';
} catch (error) {
console.error('reviewImgByAi error:', error);
return '';
}

return res
}

// 智能评论
Expand All @@ -164,41 +151,28 @@ export class AiToolsService {
desc = '无',
max = 50,
): Promise<string> {
const completion = await this.openai.chat.completions.create({
stream: true,
model: 'qwen-omni-turbo',
messages: [
{
role: 'system',
content: `你是我的好友,请对我发的短视频作品或者朋友圈作品进行评论. 请用中文回复,并且回复内容不超过${max}字.只需要返回评论内容.`,
},
{
role: 'user',
content: `作品标题: ${title}, 作品描述: ${desc}`,
},
],
stream_options: {
include_usage: true,
},
modalities: ['text'],
})

let res = ''

for await (const chunk of completion) {
if (Array.isArray(chunk.choices) && chunk.choices.length > 0) {
console.log(chunk.choices[0].delta)
const content = chunk.choices[0].delta.content
if (content === null)
return res
res += content
}
else {
console.log(chunk.usage)
}
try {
const result = await this.chatCompletion({
model: 'gpt-4o',
messages: [
{
role: 'system',
content: `你是我的好友,请对我发的短视频作品或者朋友圈作品进行评论. 请用中文回复,并且回复内容不超过${max}字.只需要返回评论内容.`,
},
{
role: 'user',
content: `作品标题: ${title}, 作品描述: ${desc}`,
},
],
temperature: 0.8,
max_tokens: max + 20,
});

return result?.choices?.[0]?.message?.content || '';
} catch (error) {
console.error('reviewAi error:', error);
return '';
}

return res
}

/**
Expand All @@ -214,41 +188,63 @@ export class AiToolsService {
desc = '无',
max = 50,
): Promise<string> {
const completion = await this.openai.chat.completions.create({
stream: true,
model: 'qwen-omni-turbo',
messages: [
{
role: 'system',
content: `你是一个风趣幽默又有分寸的文字创作者的助手,请帮我对别人对我作品的评论进行回复. 请用中文回复,并且回复内容不超过${max}字.只需要返回你的回复内容.`,
},
{
role: 'user',
content: `作品标题: ${title}, 作品描述: ${desc}, 评论内容: ${content}`,
},
],
stream_options: {
include_usage: true,
},
modalities: ['text'],
})

let res = ''

for await (const chunk of completion) {
if (Array.isArray(chunk.choices) && chunk.choices.length > 0) {
console.log(chunk.choices[0].delta)
const content = chunk.choices[0].delta.content
if (content === null)
return res
res += content
}
else {
console.log(chunk.usage)
}
try {
const result = await this.chatCompletion({
model: 'gpt-4o',
messages: [
{
role: 'system',
content: `你是一个风趣幽默又有分寸的文字创作者的助手,请帮我对别人对我作品的评论进行回复. 请用中文回复,并且回复内容不超过${max}字.只需要返回你的回复内容.`,
},
{
role: 'user',
content: `作品标题: ${title}, 作品描述: ${desc}, 评论内容: ${content}`,
},
],
temperature: 0.7,
max_tokens: max + 20,
});

return result?.choices?.[0]?.message?.content || '';
} catch (error) {
console.error('reviewAiRecover error:', error);
return '';
}
}

return res
/**
* 根据主旨和提示词生成评论
* @param subject 评论主旨
* @param prompt 提示词
* @param max 最大字数
*/
async generateComment(
subject: string,
prompt: string = '请为以下内容生成一条友善、自然的评论',
max = 100,
): Promise<string> {
try {
const result = await this.chatCompletion({
model: 'gpt-4o',
messages: [
{
role: 'system',
content: prompt,
},
{
role: 'user',
content: `评论主旨: ${subject}`,
},
],
temperature: 0.8,
max_tokens: max + 20,
});

return result?.choices?.[0]?.message?.content || '';
} catch (error) {
console.error('generateComment error:', error);
return '';
}
}

/**
Expand Down
Loading