Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions src/vs/workbench/api/common/extHostSearch.ts
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ export class ExtHostSearch implements IExtHostSearch {
}

$provideTextSearchResults(handle: number, session: number, rawQuery: IRawTextQuery, token: vscode.CancellationToken): Promise<ISearchCompleteStats> {
console.log('[ExtHostSearch] $provideTextSearchResults called', { handle, session, rawQuery });
const provider = this._textSearchProvider.get(handle);
if (!provider || !provider.provideTextSearchResults) {
throw new Error(`Unknown Text Search Provider ${handle}`);
Expand All @@ -169,11 +170,15 @@ export class ExtHostSearch implements IExtHostSearch {
}

$provideAITextSearchResults(handle: number, session: number, rawQuery: IRawAITextQuery, token: vscode.CancellationToken): Promise<ISearchCompleteStats> {
this._logService.info('[ExtHostSearch] $provideAITextSearchResults called', handle, session);

const provider = this._aiTextSearchProvider.get(handle);
if (!provider || !provider.provideAITextSearchResults) {
this._logService.error(`Unknown AI Text Search Provider ${handle}`);
throw new Error(`Unknown AI Text Search Provider ${handle}`);
}

this._logService.info('[ExtHostSearch] Provider found, creating TextSearchManager');
const query = reviveQuery(rawQuery);
const engine = this.createAITextSearchManager(query, provider);
return engine.search(progress => this._proxy.$handleTextMatch(handle, session, progress), token);
Expand Down
24 changes: 24 additions & 0 deletions src/vs/workbench/contrib/void/browser/chatThreadService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -802,6 +802,16 @@ class ChatThreadService extends Disposable implements IChatThreadService {
let resMessageIsDonePromise: (res: ResTypes) => void // resolves when user approves this tool use (or if tool doesn't require approval)
const messageIsDonePromise = new Promise<ResTypes>((res, rej) => { resMessageIsDonePromise = res })

// 日志:Browser端 - LLM请求开始
console.log('🔥 [Browser] LLM Chat Request Starting =====================================')
console.log('🔥 [Browser] Thread ID:', threadId)
console.log('🔥 [Browser] Chat Mode:', chatMode)
console.log('🔥 [Browser] Model Selection:', modelSelection)
console.log('🔥 [Browser] Messages Count:', messages.length)
console.log('🔥 [Browser] Separate System Message Length:', separateSystemMessage?.length || 0)
console.log('🔥 [Browser] Messages Preview:', messages.slice(-2)) // 显示最后2条消息
console.log('🔥 [Browser] =====================================')

const llmCancelToken = this._llmMessageService.sendLLMMessage({
messagesType: 'chatMessages',
chatMode,
Expand All @@ -812,16 +822,30 @@ class ChatThreadService extends Disposable implements IChatThreadService {
logging: { loggingName: `Chat - ${chatMode}`, loggingExtras: { threadId, nMessagesSent, chatMode } },
separateSystemMessage: separateSystemMessage,
onText: ({ fullText, fullReasoning, toolCall }) => {
// 更新状态,不打印流式日志
this._setStreamState(threadId, { isRunning: 'LLM', llmInfo: { displayContentSoFar: fullText, reasoningSoFar: fullReasoning, toolCallSoFar: toolCall ?? null }, interrupt: Promise.resolve(() => { if (llmCancelToken) this._llmMessageService.abort(llmCancelToken) }) })
},
onFinalMessage: async ({ fullText, fullReasoning, toolCall, anthropicReasoning, }) => {
// 日志:Browser端 - 最终响应
console.log('🔥 [Browser] LLM Response Complete =====================================')
console.log('🔥 [Browser] Final Text Length:', fullText.length)
console.log('🔥 [Browser] Final Text:', fullText)
console.log('🔥 [Browser] Final Reasoning Length:', fullReasoning?.length || 0)
console.log('🔥 [Browser] Final Tool Call:', toolCall)
console.log('🔥 [Browser] Anthropic Reasoning:', anthropicReasoning)
console.log('🔥 [Browser] =====================================')
resMessageIsDonePromise({ type: 'llmDone', toolCall, info: { fullText, fullReasoning, anthropicReasoning } }) // resolve with tool calls
},
onError: async (error) => {
// 日志:Browser端 - 错误
console.log('🔥 [Browser] LLM Error =====================================')
console.error('🔥 [Browser] Error:', error)
console.log('🔥 [Browser] =====================================')
resMessageIsDonePromise({ type: 'llmError', error: error })
},
onAbort: () => {
// stop the loop to free up the promise, but don't modify state (already handled by whatever stopped it)
console.log('🔥 [Browser] LLM Request Aborted')
resMessageIsDonePromise({ type: 'llmAborted' })
this._metricsService.capture('Agent Loop Done (Aborted)', { nMessagesSent, chatMode })
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -594,6 +594,16 @@ class ConvertToLLMMessageService extends Disposable implements IConvertToLLMMess

const persistentTerminalIDs = this.terminalToolService.listPersistentTerminalIds()
const systemMessage = chat_systemMessage({ workspaceFolders, openedURIs, directoryStr, activeURI, persistentTerminalIDs, chatMode, mcpTools, includeXMLToolDefinitions })

// 日志:显示生成的系统消息
console.log('📋 [System Message Generated] =====================================')
console.log('📋 [System Message Generated] Chat Mode:', chatMode)
console.log('📋 [System Message Generated] Workspace Folders:', workspaceFolders)
console.log('📋 [System Message Generated] Active URI:', activeURI)
console.log('📋 [System Message Generated] System Message Length:', systemMessage?.length || 0)
console.log('📋 [System Message Generated] System Message Preview:', systemMessage?.substring(0, 500) + (systemMessage?.length > 500 ? '...' : ''))
console.log('📋 [System Message Generated] =====================================')

return systemMessage
}

Expand Down
14 changes: 13 additions & 1 deletion src/vs/workbench/contrib/void/browser/toolsService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,7 @@ export class ToolsService implements IToolsService {
is_regex: isRegexUnknown,
page_number: pageNumberUnknown
} = params
console.log('[toolsService] validateParams.search_for_files called', { queryUnknown, searchInFolderUnknown, isRegexUnknown, pageNumberUnknown });
const queryStr = validateStr('query', queryUnknown)
const pageNumber = validatePageNum(pageNumberUnknown)
const searchInFolder = validateOptionalURI(searchInFolderUnknown)
Expand Down Expand Up @@ -295,27 +296,37 @@ export class ToolsService implements IToolsService {

this.callTool = {
read_file: async ({ uri, startLine, endLine, pageNumber }) => {
console.log('[read_file] callTool entry', { uri: uri.toString(), startLine, endLine, pageNumber });
await voidModelService.initializeModel(uri)
console.log('[read_file] after initializeModel');
const { model } = await voidModelService.getModelSafe(uri)
if (model === null) { throw new Error(`No contents; File does not exist.`) }
console.log('[read_file] got model', { hasModel: !!model });
if (model === null) {
console.error('[read_file] No contents; File does not exist.', uri);
throw new Error(`No contents; File does not exist.`)
}

let contents: string
if (startLine === null && endLine === null) {
contents = model.getValue(EndOfLinePreference.LF)
console.log('[read_file] getValue full', { length: contents.length });
}
else {
const startLineNumber = startLine === null ? 1 : startLine
const endLineNumber = endLine === null ? model.getLineCount() : endLine
contents = model.getValueInRange({ startLineNumber, startColumn: 1, endLineNumber, endColumn: Number.MAX_SAFE_INTEGER }, EndOfLinePreference.LF)
console.log('[read_file] getValueInRange', { startLineNumber, endLineNumber, length: contents.length });
}

const totalNumLines = model.getLineCount()
console.log('[read_file] totalNumLines', totalNumLines);

const fromIdx = MAX_FILE_CHARS_PAGE * (pageNumber - 1)
const toIdx = MAX_FILE_CHARS_PAGE * pageNumber - 1
const fileContents = contents.slice(fromIdx, toIdx + 1) // paginate
const hasNextPage = (contents.length - 1) - toIdx >= 1
const totalFileLen = contents.length
console.log('[read_file] pagination', { fromIdx, toIdx, fileContentsLen: fileContents.length, hasNextPage, totalFileLen });
return { result: { fileContents, totalFileLen, hasNextPage, totalNumLines } }
},

Expand Down Expand Up @@ -349,6 +360,7 @@ export class ToolsService implements IToolsService {
},

search_for_files: async ({ query: queryStr, isRegex, searchInFolder, pageNumber }) => {
console.log('[toolsService] search_for_files called', { queryStr, isRegex, searchInFolder, pageNumber });
const searchFolders = searchInFolder === null ?
workspaceContextService.getWorkspace().folders.map(f => f.uri)
: [searchInFolder]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,16 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE
// max_completion_tokens: maxTokens,
}

// 日志:显示发送给OpenAI API的完整请求
console.log('🔄 [OpenAI-Compatible API Request] =====================================')
console.log('🔄 [OpenAI-Compatible API Request] Provider:', providerName)
console.log('🔄 [OpenAI-Compatible API Request] Model:', modelName)
console.log('🔄 [OpenAI-Compatible API Request] Options:', JSON.stringify(options, null, 2))
if (separateSystemMessage) {
console.log('🔄 [OpenAI-Compatible API Request] System Message:', separateSystemMessage)
}
console.log('🔄 [OpenAI-Compatible API Request] =====================================')

// open source models - manually parse think tokens
const { needsManualParse: needsManualReasoningParse, nameOfFieldInDelta: nameOfReasoningFieldInDelta } = providerReasoningIOSettings?.output ?? {}
const manuallyParseReasoning = needsManualReasoningParse && canIOReasoning && openSourceThinkTags
Expand All @@ -333,12 +343,21 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE
let toolId = ''
let toolParamsStr = ''

// 日志:记录即将发起的API调用
console.log('🌐 [API Call Starting] OpenAI Chat Completions Create')
console.log('🌐 [API Call] Endpoint: chat/completions')
console.log('🌐 [API Call] Full Request Options:', JSON.stringify(options, null, 2))

openai.chat.completions
.create(options)
.then(async response => {
_setAborter(() => response.controller.abort())

console.log('🌐 [API Response] OpenAI stream started successfully')

// when receive text
for await (const chunk of response) {
// 处理流式响应,不打印chunk日志
// message
const newText = chunk.choices[0]?.delta?.content ?? ''
fullTextSoFar += newText
Expand Down Expand Up @@ -370,18 +389,41 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE
})

}

// 日志:记录OpenAI流结束
console.log('🌐 [API Response] OpenAI stream completed')

// on final
if (!fullTextSoFar && !fullReasoningSoFar && !toolName) {
console.log('❌ [OpenAI Error] Empty response from model')
onError({ message: 'Void: Response from model was empty.', fullError: null })
}
else {
const toolCall = rawToolCallObjOfParamsStr(toolName, toolParamsStr, toolId)
const toolCallObj = toolCall ? { toolCall } : {}

// 日志:记录OpenAI最终响应
console.log('✅ [OpenAI Final Message] =====================================')
console.log('✅ [OpenAI Final Message] Full Text:', fullTextSoFar)
console.log('✅ [OpenAI Final Message] Full Reasoning:', fullReasoningSoFar)
console.log('✅ [OpenAI Final Message] Tool Name:', toolName)
console.log('✅ [OpenAI Final Message] Tool Params:', toolParamsStr)
console.log('✅ [OpenAI Final Message] Tool Call Object:', toolCall)
console.log('✅ [OpenAI Final Message] =====================================')

onFinalMessage({ fullText: fullTextSoFar, fullReasoning: fullReasoningSoFar, anthropicReasoning: null, ...toolCallObj });
}
})
// when error/fail - this catches errors of both .create() and .then(for await)
.catch(error => {
// 日志:记录OpenAI错误
console.log('❌ [OpenAI Error] =====================================')
console.log('❌ [OpenAI Error] Error Object:', error)
console.log('❌ [OpenAI Error] Error Status:', error instanceof OpenAI.APIError ? error.status : 'N/A')
console.log('❌ [OpenAI Error] Error Message:', error.message || error.toString())
console.log('❌ [OpenAI Error] Error Stack:', error.stack)
console.log('❌ [OpenAI Error] =====================================')

if (error instanceof OpenAI.APIError && error.status === 401) { onError({ message: invalidApiKeyMessage(providerName), fullError: error }); }
else { onError({ message: error + '', fullError: error }); }
})
Expand Down Expand Up @@ -484,15 +526,28 @@ const sendAnthropicChat = async ({ messages, providerName, onText, onFinalMessag
dangerouslyAllowBrowser: true
});

const stream = anthropic.messages.stream({
const anthropicRequestOptions = {
system: separateSystemMessage ?? undefined,
messages: messages as AnthropicLLMChatMessage[],
model: modelName,
max_tokens: maxTokens ?? 4_096, // anthropic requires this
...includeInPayload,
...nativeToolsObj,
}

})
// 日志:显示发送给Anthropic API的完整请求
console.log('🤖 [Anthropic API Request] =====================================')
console.log('🤖 [Anthropic API Request] Provider:', providerName)
console.log('🤖 [Anthropic API Request] Model:', modelName)
console.log('🤖 [Anthropic API Request] Options:', JSON.stringify(anthropicRequestOptions, null, 2))
console.log('🤖 [Anthropic API Request] =====================================')

console.log('🌐 [API Call Starting] Anthropic Messages Stream')
console.log('🌐 [API Call] Endpoint: messages (streaming)')

const stream = anthropic.messages.stream(anthropicRequestOptions)

console.log('🌐 [API Response] Anthropic stream created successfully')

// manually parse out tool results if XML
if (!specialToolFormat) {
Expand All @@ -518,6 +573,8 @@ const sendAnthropicChat = async ({ messages, providerName, onText, onFinalMessag
}
// there are no events for tool_use, it comes in at the end
stream.on('streamEvent', e => {
// 日志:记录每个Anthropic流事件
console.log('🔄 [Anthropic Stream Event]', JSON.stringify(e, null, 2))
// start block
if (e.type === 'content_block_start') {
if (e.content_block.type === 'text') {
Expand Down Expand Up @@ -561,17 +618,31 @@ const sendAnthropicChat = async ({ messages, providerName, onText, onFinalMessag

// on done - (or when error/fail) - this is called AFTER last streamEvent
stream.on('finalMessage', (response) => {
// 日志:记录Anthropic最终响应
console.log('✅ [Anthropic Final Message] =====================================')
console.log('✅ [Anthropic Final Message] Raw Response:', JSON.stringify(response, null, 2))

const anthropicReasoning = response.content.filter(c => c.type === 'thinking' || c.type === 'redacted_thinking')
const tools = response.content.filter(c => c.type === 'tool_use')
// console.log('TOOLS!!!!!!', JSON.stringify(tools, null, 2))
// console.log('TOOLS!!!!!!', JSON.stringify(response, null, 2))
const toolCall = tools[0] && rawToolCallObjOfAnthropicParams(tools[0])
const toolCallObj = toolCall ? { toolCall } : {}

console.log('✅ [Anthropic Final Message] Processed - Full Text:', fullText)
console.log('✅ [Anthropic Final Message] Processed - Full Reasoning:', fullReasoning)
console.log('✅ [Anthropic Final Message] Processed - Tool Call:', toolCall)
console.log('✅ [Anthropic Final Message] =====================================')

onFinalMessage({ fullText, fullReasoning, anthropicReasoning, ...toolCallObj })
})
// on error
stream.on('error', (error) => {
// 日志:记录Anthropic错误
console.log('❌ [Anthropic Error] =====================================')
console.log('❌ [Anthropic Error] Error Object:', error)
console.log('❌ [Anthropic Error] Error Status:', error instanceof Anthropic.APIError ? error.status : 'N/A')
console.log('❌ [Anthropic Error] Error Message:', error.message || error.toString())
console.log('❌ [Anthropic Error] =====================================')

if (error instanceof Anthropic.APIError && error.status === 401) { onError({ message: invalidApiKeyMessage(providerName), fullError: error }) }
else { onError({ message: error + '', fullError: error }) }
})
Expand Down Expand Up @@ -777,21 +848,36 @@ const sendGeminiChat = async ({
let toolParamsStr = ''
let toolId = ''


genAI.models.generateContentStream({
const geminiRequestOptions = {
model: modelName,
config: {
systemInstruction: separateSystemMessage,
thinkingConfig: thinkingConfig,
tools: toolConfig,
},
contents: messages as GeminiLLMChatMessage[],
})
}

// 日志:显示发送给Gemini API的完整请求
console.log('🧠 [Gemini API Request] =====================================')
console.log('🧠 [Gemini API Request] Provider:', providerName)
console.log('🧠 [Gemini API Request] Model:', modelName)
console.log('🧠 [Gemini API Request] Options:', JSON.stringify(geminiRequestOptions, null, 2))
console.log('🧠 [Gemini API Request] =====================================')

console.log('🌐 [API Call Starting] Gemini Generate Content Stream')
console.log('🌐 [API Call] Endpoint: generateContentStream')

genAI.models.generateContentStream(geminiRequestOptions)
.then(async (stream) => {
_setAborter(() => { stream.return(fullTextSoFar); });

console.log('🌐 [API Response] Gemini stream created successfully')

// Process the stream
for await (const chunk of stream) {
// 日志:记录每个Gemini流响应块
console.log('🔄 [Gemini Stream Chunk]', JSON.stringify(chunk, null, 2))
// message
const newText = chunk.text ?? ''
fullTextSoFar += newText
Expand All @@ -815,17 +901,38 @@ const sendGeminiChat = async ({
})
}

// 日志:记录Gemini流结束
console.log('🌐 [API Response] Gemini stream completed')

// on final
if (!fullTextSoFar && !fullReasoningSoFar && !toolName) {
console.log('❌ [Gemini Error] Empty response from model')
onError({ message: 'Void: Response from model was empty.', fullError: null })
} else {
if (!toolId) toolId = generateUuid() // ids are empty, but other providers might expect an id
const toolCall = rawToolCallObjOfParamsStr(toolName, toolParamsStr, toolId)
const toolCallObj = toolCall ? { toolCall } : {}

// 日志:记录Gemini最终响应
console.log('✅ [Gemini Final Message] =====================================')
console.log('✅ [Gemini Final Message] Full Text:', fullTextSoFar)
console.log('✅ [Gemini Final Message] Full Reasoning:', fullReasoningSoFar)
console.log('✅ [Gemini Final Message] Tool Name:', toolName)
console.log('✅ [Gemini Final Message] Tool Params:', toolParamsStr)
console.log('✅ [Gemini Final Message] Tool Call Object:', toolCall)
console.log('✅ [Gemini Final Message] =====================================')

onFinalMessage({ fullText: fullTextSoFar, fullReasoning: fullReasoningSoFar, anthropicReasoning: null, ...toolCallObj });
}
})
.catch(error => {
// 日志:记录Gemini错误
console.log('❌ [Gemini Error] =====================================')
console.log('❌ [Gemini Error] Error Object:', error)
console.log('❌ [Gemini Error] Error Message:', error?.message || error.toString())
console.log('❌ [Gemini Error] Error Stack:', error?.stack)
console.log('❌ [Gemini Error] =====================================')

const message = error?.message
if (typeof message === 'string') {

Expand Down
Loading