Skip to content

Commit 3c682c6

Browse files
author
Kerwin
committed
fix: 优化随机key的逻辑 (Close #204)
1 parent 5a2b929 commit 3c682c6

File tree

3 files changed

+25
-40
lines changed

3 files changed

+25
-40
lines changed

service/src/chatgpt/index.ts

Lines changed: 18 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ const ErrorCodeMessage: Record<string, string> = {
3131
}
3232

3333
let auditService: TextAuditService
34+
const _lockedKeys: { key: string; lockedTime: number }[] = []
3435

3536
export async function initApi(key: KeyConfig, chatModel: CHATMODEL) {
3637
// More Info: https://github.com/transitive-bullshit/chatgpt-api
@@ -84,8 +85,8 @@ export async function initApi(key: KeyConfig, chatModel: CHATMODEL) {
8485
const processThreads: { userId: string; abort: AbortController; messageId: string }[] = []
8586
async function chatReplyProcess(options: RequestOptions) {
8687
const model = options.chatModel
87-
const key = options.key
88-
const userId = options.userId
88+
const key = await getRandomApiKey(options.user, options.user.config.chatModel)
89+
const userId = options.user._id.toString()
8990
const messageId = options.messageId
9091
if (key == null || key === undefined)
9192
throw new Error('没有可用的配置。请再试一次 | No available configuration. Please try again.')
@@ -124,13 +125,20 @@ async function chatReplyProcess(options: RequestOptions) {
124125
}
125126
catch (error: any) {
126127
const code = error.statusCode
127-
global.console.log(error)
128+
if (code === 429 && (error.message.includes('Too Many Requests') || error.message.includes('Rate limit'))) {
129+
// access token Only one message at a time
130+
if (options.tryCount++ < 3) {
131+
_lockedKeys.push({ key: key.key, lockedTime: Date.now() })
132+
await new Promise(resolve => setTimeout(resolve, 2000))
133+
return await chatReplyProcess(options)
134+
}
135+
}
136+
global.console.error(error)
128137
if (Reflect.has(ErrorCodeMessage, code))
129138
return sendResponse({ type: 'Fail', message: ErrorCodeMessage[code] })
130139
return sendResponse({ type: 'Fail', message: error.message ?? 'Please check the back-end console' })
131140
}
132141
finally {
133-
releaseApiKey(key)
134142
const index = processThreads.findIndex(d => d.userId === userId)
135143
if (index > -1)
136144
processThreads.splice(index, 1)
@@ -326,29 +334,23 @@ async function getMessageById(id: string): Promise<ChatMessage | undefined> {
326334
else { return undefined }
327335
}
328336

329-
const _lockedKeys: { key: string; count: number }[] = []
330-
const _oneTimeCount = 3 // api
331337
async function randomKeyConfig(keys: KeyConfig[]): Promise<KeyConfig | null> {
332338
if (keys.length <= 0)
333339
return null
334-
let unsedKeys = keys.filter(d => _lockedKeys.filter(l => d.key === l.key).length <= 0
335-
|| _lockedKeys.filter(l => d.key === l.key)[0].count < _oneTimeCount)
340+
// cleanup old locked keys
341+
_lockedKeys.filter(d => d.lockedTime <= Date.now() - 1000 * 20).forEach(d => _lockedKeys.splice(_lockedKeys.indexOf(d), 1))
342+
343+
let unsedKeys = keys.filter(d => _lockedKeys.filter(l => d.key === l.key).length <= 0)
336344
const start = Date.now()
337345
while (unsedKeys.length <= 0) {
338346
if (Date.now() - start > 3000)
339347
break
340348
await new Promise(resolve => setTimeout(resolve, 1000))
341-
unsedKeys = keys.filter(d => _lockedKeys.filter(l => d.key === l.key).length <= 0
342-
|| _lockedKeys.filter(l => d.key === l.key)[0].count < _oneTimeCount)
349+
unsedKeys = keys.filter(d => _lockedKeys.filter(l => d.key === l.key).length <= 0)
343350
}
344351
if (unsedKeys.length <= 0)
345352
return null
346353
const thisKey = unsedKeys[Math.floor(Math.random() * unsedKeys.length)]
347-
const thisLockedKey = _lockedKeys.filter(d => d.key === thisKey.key)
348-
if (thisLockedKey.length <= 0)
349-
_lockedKeys.push({ key: thisKey.key, count: 1 })
350-
else
351-
thisLockedKey[0].count++
352354
return thisKey
353355
}
354356

@@ -357,23 +359,6 @@ async function getRandomApiKey(user: UserInfo, chatModel: CHATMODEL): Promise<Ke
357359
return randomKeyConfig(keys.filter(d => d.chatModels.includes(chatModel)))
358360
}
359361

360-
async function releaseApiKey(key: KeyConfig) {
361-
if (key == null || key === undefined)
362-
return
363-
364-
const lockedKeys = _lockedKeys.filter(d => d.key === key.key)
365-
if (lockedKeys.length > 0) {
366-
if (lockedKeys[0].count <= 1) {
367-
const index = _lockedKeys.findIndex(item => item.key === key.key)
368-
if (index !== -1)
369-
_lockedKeys.splice(index, 1)
370-
}
371-
else {
372-
lockedKeys[0].count--
373-
}
374-
}
375-
}
376-
377362
export type { ChatContext, ChatMessage }
378363

379-
export { chatReplyProcess, chatConfig, containsSensitiveWords, getRandomApiKey }
364+
export { chatReplyProcess, chatConfig, containsSensitiveWords }

service/src/chatgpt/types.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import type { ChatMessage } from 'chatgpt'
2-
import type { CHATMODEL, KeyConfig } from 'src/storage/model'
2+
import type { CHATMODEL, UserInfo } from 'src/storage/model'
33

44
export interface RequestOptions {
55
message: string
@@ -9,9 +9,9 @@ export interface RequestOptions {
99
temperature?: number
1010
top_p?: number
1111
chatModel: CHATMODEL
12-
key: KeyConfig
13-
userId: string
12+
user: UserInfo
1413
messageId: string
14+
tryCount: number
1515
}
1616

1717
export interface BalanceResponse {

service/src/index.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import * as dotenv from 'dotenv'
44
import { ObjectId } from 'mongodb'
55
import type { RequestProps } from './types'
66
import type { ChatContext, ChatMessage } from './chatgpt'
7-
import { abortChatProcess, chatConfig, chatReplyProcess, containsSensitiveWords, getRandomApiKey, initAuditService } from './chatgpt'
7+
import { abortChatProcess, chatConfig, chatReplyProcess, containsSensitiveWords, initAuditService } from './chatgpt'
88
import { auth, getUserId } from './middleware/auth'
99
import { clearApiKeyCache, clearConfigCache, getApiKeys, getCacheApiKeys, getCacheConfig, getOriginConfig } from './storage/config'
1010
import type { AuditConfig, CHATMODEL, ChatInfo, ChatOptions, Config, KeyConfig, MailConfig, SiteConfig, UsageResponse, UserInfo } from './storage/model'
@@ -430,9 +430,9 @@ router.post('/chat-process', [auth, limiter], async (req, res) => {
430430
temperature,
431431
top_p,
432432
chatModel: user.config.chatModel,
433-
key: await getRandomApiKey(user, user.config.chatModel),
434-
userId,
433+
user,
435434
messageId: message._id.toString(),
435+
tryCount: 0,
436436
})
437437
// return the whole response including usage
438438
res.write(`\n${JSON.stringify(result.data)}`)
@@ -480,7 +480,7 @@ router.post('/chat-process', [auth, limiter], async (req, res) => {
480480
}
481481
}
482482
catch (error) {
483-
global.console.log(error)
483+
global.console.error(error)
484484
}
485485
}
486486
})

0 commit comments

Comments
 (0)