Skip to content

Commit 204055c

Browse files
authored
Merge pull request #260 from Fanlu91/fix_fetchChatAPI
fix: remove unused fetchChatAPI
2 parents af2f07b + 397e5f4 commit 204055c

File tree

3 files changed

+36
-78
lines changed

3 files changed

+36
-78
lines changed

service/src/chatgpt/index.ts

Lines changed: 35 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -49,31 +49,32 @@ export async function initApi(key: KeyConfig, chatModel: CHATMODEL) {
4949
messageStore: undefined,
5050
getMessageById,
5151
}
52-
53-
// Set the token limits based on the model's type. This is because different models have different token limits.
54-
// The token limit includes the token count from both the message array sent and the model response.
55-
// 'gpt-35-turbo' has a limit of 4096 tokens, 'gpt-4' and 'gpt-4-32k' have limits of 8192 and 32768 tokens respectively.
56-
57-
// Check if the model type includes '16k'
58-
if (model.toLowerCase().includes('16k')) {
59-
// If it's a '16k' model, set the maxModelTokens to 16384 and maxResponseTokens to 4096
60-
options.maxModelTokens = 16384;
61-
options.maxResponseTokens = 4096;
62-
} else if (model.toLowerCase().includes('32k')) {
63-
// If it's a '32k' model, set the maxModelTokens to 32768 and maxResponseTokens to 8192
64-
options.maxModelTokens = 32768;
65-
options.maxResponseTokens = 8192;
66-
} else if (model.toLowerCase().includes('gpt-4')) {
67-
// If it's a 'gpt-4' model, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
68-
options.maxModelTokens = 8192;
69-
options.maxResponseTokens = 2048;
70-
} else {
71-
// If none of the above, use the default values, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
72-
options.maxModelTokens = 4096;
73-
options.maxResponseTokens = 1024;
74-
}
7552

53+
// Set the token limits based on the model's type. This is because different models have different token limits.
54+
// The token limit includes the token count from both the message array sent and the model response.
55+
// 'gpt-35-turbo' has a limit of 4096 tokens, 'gpt-4' and 'gpt-4-32k' have limits of 8192 and 32768 tokens respectively.
7656

57+
// Check if the model type includes '16k'
58+
if (model.toLowerCase().includes('16k')) {
59+
// If it's a '16k' model, set the maxModelTokens to 16384 and maxResponseTokens to 4096
60+
options.maxModelTokens = 16384
61+
options.maxResponseTokens = 4096
62+
}
63+
else if (model.toLowerCase().includes('32k')) {
64+
// If it's a '32k' model, set the maxModelTokens to 32768 and maxResponseTokens to 8192
65+
options.maxModelTokens = 32768
66+
options.maxResponseTokens = 8192
67+
}
68+
else if (model.toLowerCase().includes('gpt-4')) {
69+
// If it's a 'gpt-4' model, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
70+
options.maxModelTokens = 8192
71+
options.maxResponseTokens = 2048
72+
}
73+
else {
74+
// If none of the above, use the default values, set the maxModelTokens and maxResponseTokens to 8192 and 2048 respectively
75+
options.maxModelTokens = 4096
76+
options.maxResponseTokens = 1024
77+
}
7778

7879
if (isNotEmptyString(OPENAI_API_BASE_URL))
7980
options.apiBaseUrl = `${OPENAI_API_BASE_URL}/v1`
@@ -262,12 +263,21 @@ async function fetchBalance() {
262263
console.error('您的账户已被封禁,请登录OpenAI进行查看。')
263264
return
264265
}
265-
const subscriptionData = await response.json()
266+
interface SubscriptionData {
267+
hard_limit_usd?: number
268+
// 这里可以添加其他可能的属性
269+
}
270+
const subscriptionData: SubscriptionData = await response.json()
266271
const totalAmount = subscriptionData.hard_limit_usd
267272

273+
interface UsageData {
274+
total_usage?: number
275+
// 这里可以添加其他可能的属性
276+
}
277+
268278
// 获取已使用量
269279
response = await fetch(urlUsage, { agent: socksAgent === undefined ? httpsAgent : socksAgent, headers })
270-
const usageData = await response.json()
280+
const usageData: UsageData = await response.json()
271281
const totalUsage = usageData.total_usage / 100
272282

273283
// 计算剩余额度

service/src/index.ts

Lines changed: 1 addition & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import jwt from 'jsonwebtoken'
33
import * as dotenv from 'dotenv'
44
import { ObjectId } from 'mongodb'
55
import type { RequestProps } from './types'
6-
import type { ChatContext, ChatMessage } from './chatgpt'
6+
import type { ChatMessage } from './chatgpt'
77
import { abortChatProcess, chatConfig, chatReplyProcess, containsSensitiveWords, initAuditService } from './chatgpt'
88
import { auth, getUserId } from './middleware/auth'
99
import { clearApiKeyCache, clearConfigCache, getApiKeys, getCacheApiKeys, getCacheConfig, getOriginConfig } from './storage/config'
@@ -335,46 +335,6 @@ router.post('/chat-clear', auth, async (req, res) => {
335335
}
336336
})
337337

338-
router.post('/chat', auth, async (req, res) => {
339-
try {
340-
const { roomId, uuid, regenerate, prompt, options = {} } = req.body as
341-
{ roomId: number; uuid: number; regenerate: boolean; prompt: string; options?: ChatContext }
342-
const message = regenerate
343-
? await getChat(roomId, uuid)
344-
: await insertChat(uuid, prompt, roomId, options as ChatOptions)
345-
const response = await chatReply(prompt, options)
346-
if (response.status === 'Success') {
347-
if (regenerate && message.options.messageId) {
348-
const previousResponse = message.previousResponse || []
349-
previousResponse.push({ response: message.response, options: message.options })
350-
await updateChat(message._id as unknown as string,
351-
response.data.text,
352-
response.data.id,
353-
response.data.detail?.usage as UsageResponse,
354-
previousResponse as [])
355-
}
356-
else {
357-
await updateChat(message._id as unknown as string,
358-
response.data.text,
359-
response.data.id,
360-
response.data.detail?.usage as UsageResponse)
361-
}
362-
363-
if (response.data.usage) {
364-
await insertChatUsage(new ObjectId(req.headers.userId as string),
365-
roomId,
366-
message._id,
367-
response.data.id,
368-
response.data.detail?.usage as UsageResponse)
369-
}
370-
}
371-
res.send(response)
372-
}
373-
catch (error) {
374-
res.send(error)
375-
}
376-
})
377-
378338
router.post('/chat-process', [auth, limiter], async (req, res) => {
379339
res.setHeader('Content-type', 'application/octet-stream')
380340

src/api/index.ts

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,6 @@ import { get, post } from '@/utils/request'
33
import type { AuditConfig, CHATMODEL, ConfigState, KeyConfig, MailConfig, SiteConfig, Status, UserRole } from '@/components/common/Setting/model'
44
import { useAuthStore, useSettingStore } from '@/store'
55

6-
export function fetchChatAPI<T = any>(
7-
prompt: string,
8-
options?: { conversationId?: string; parentMessageId?: string },
9-
signal?: GenericAbortSignal,
10-
) {
11-
return post<T>({
12-
url: '/chat',
13-
data: { prompt, options },
14-
signal,
15-
})
16-
}
17-
186
export function fetchChatConfig<T = any>() {
197
return post<T>({
208
url: '/config',

0 commit comments

Comments
 (0)