diff --git a/src/vs/workbench/api/common/extHostSearch.ts b/src/vs/workbench/api/common/extHostSearch.ts index fb92adb1fc9..15ad3676bb4 100644 --- a/src/vs/workbench/api/common/extHostSearch.ts +++ b/src/vs/workbench/api/common/extHostSearch.ts @@ -158,6 +158,7 @@ export class ExtHostSearch implements IExtHostSearch { } $provideTextSearchResults(handle: number, session: number, rawQuery: IRawTextQuery, token: vscode.CancellationToken): Promise { + console.log('[ExtHostSearch] $provideTextSearchResults called', { handle, session, rawQuery }); const provider = this._textSearchProvider.get(handle); if (!provider || !provider.provideTextSearchResults) { throw new Error(`Unknown Text Search Provider ${handle}`); @@ -169,11 +170,15 @@ export class ExtHostSearch implements IExtHostSearch { } $provideAITextSearchResults(handle: number, session: number, rawQuery: IRawAITextQuery, token: vscode.CancellationToken): Promise { + this._logService.info('[ExtHostSearch] $provideAITextSearchResults called', handle, session); + const provider = this._aiTextSearchProvider.get(handle); if (!provider || !provider.provideAITextSearchResults) { + this._logService.error(`Unknown AI Text Search Provider ${handle}`); throw new Error(`Unknown AI Text Search Provider ${handle}`); } + this._logService.info('[ExtHostSearch] Provider found, creating TextSearchManager'); const query = reviveQuery(rawQuery); const engine = this.createAITextSearchManager(query, provider); return engine.search(progress => this._proxy.$handleTextMatch(handle, session, progress), token); diff --git a/src/vs/workbench/contrib/void/browser/chatThreadService.ts b/src/vs/workbench/contrib/void/browser/chatThreadService.ts index 30f38f10ba8..c0732d1c3c2 100644 --- a/src/vs/workbench/contrib/void/browser/chatThreadService.ts +++ b/src/vs/workbench/contrib/void/browser/chatThreadService.ts @@ -802,6 +802,16 @@ class ChatThreadService extends Disposable implements IChatThreadService { let resMessageIsDonePromise: (res: ResTypes) => void // resolves when user approves this tool use (or if tool doesn't require approval) const messageIsDonePromise = new Promise((res, rej) => { resMessageIsDonePromise = res }) + // 日志:Browser端 - LLM请求开始 + console.log('🔥 [Browser] LLM Chat Request Starting =====================================') + console.log('🔥 [Browser] Thread ID:', threadId) + console.log('🔥 [Browser] Chat Mode:', chatMode) + console.log('🔥 [Browser] Model Selection:', modelSelection) + console.log('🔥 [Browser] Messages Count:', messages.length) + console.log('🔥 [Browser] Separate System Message Length:', separateSystemMessage?.length || 0) + console.log('🔥 [Browser] Messages Preview:', messages.slice(-2)) // 显示最后2条消息 + console.log('🔥 [Browser] =====================================') + const llmCancelToken = this._llmMessageService.sendLLMMessage({ messagesType: 'chatMessages', chatMode, @@ -812,16 +822,30 @@ class ChatThreadService extends Disposable implements IChatThreadService { logging: { loggingName: `Chat - ${chatMode}`, loggingExtras: { threadId, nMessagesSent, chatMode } }, separateSystemMessage: separateSystemMessage, onText: ({ fullText, fullReasoning, toolCall }) => { + // 更新状态,不打印流式日志 this._setStreamState(threadId, { isRunning: 'LLM', llmInfo: { displayContentSoFar: fullText, reasoningSoFar: fullReasoning, toolCallSoFar: toolCall ?? null }, interrupt: Promise.resolve(() => { if (llmCancelToken) this._llmMessageService.abort(llmCancelToken) }) }) }, onFinalMessage: async ({ fullText, fullReasoning, toolCall, anthropicReasoning, }) => { + // 日志:Browser端 - 最终响应 + console.log('🔥 [Browser] LLM Response Complete =====================================') + console.log('🔥 [Browser] Final Text Length:', fullText.length) + console.log('🔥 [Browser] Final Text:', fullText) + console.log('🔥 [Browser] Final Reasoning Length:', fullReasoning?.length || 0) + console.log('🔥 [Browser] Final Tool Call:', toolCall) + console.log('🔥 [Browser] Anthropic Reasoning:', anthropicReasoning) + console.log('🔥 [Browser] =====================================') resMessageIsDonePromise({ type: 'llmDone', toolCall, info: { fullText, fullReasoning, anthropicReasoning } }) // resolve with tool calls }, onError: async (error) => { + // 日志:Browser端 - 错误 + console.log('🔥 [Browser] LLM Error =====================================') + console.error('🔥 [Browser] Error:', error) + console.log('🔥 [Browser] =====================================') resMessageIsDonePromise({ type: 'llmError', error: error }) }, onAbort: () => { // stop the loop to free up the promise, but don't modify state (already handled by whatever stopped it) + console.log('🔥 [Browser] LLM Request Aborted') resMessageIsDonePromise({ type: 'llmAborted' }) this._metricsService.capture('Agent Loop Done (Aborted)', { nMessagesSent, chatMode }) }, diff --git a/src/vs/workbench/contrib/void/browser/convertToLLMMessageService.ts b/src/vs/workbench/contrib/void/browser/convertToLLMMessageService.ts index 94545c0d751..0ee54f90d08 100644 --- a/src/vs/workbench/contrib/void/browser/convertToLLMMessageService.ts +++ b/src/vs/workbench/contrib/void/browser/convertToLLMMessageService.ts @@ -594,6 +594,16 @@ class ConvertToLLMMessageService extends Disposable implements IConvertToLLMMess const persistentTerminalIDs = this.terminalToolService.listPersistentTerminalIds() const systemMessage = chat_systemMessage({ workspaceFolders, openedURIs, directoryStr, activeURI, persistentTerminalIDs, chatMode, mcpTools, includeXMLToolDefinitions }) + + // 日志:显示生成的系统消息 + console.log('📋 [System Message Generated] =====================================') + console.log('📋 [System Message Generated] Chat Mode:', chatMode) + console.log('📋 [System Message Generated] Workspace Folders:', workspaceFolders) + console.log('📋 [System Message Generated] Active URI:', activeURI) + console.log('📋 [System Message Generated] System Message Length:', systemMessage?.length || 0) + console.log('📋 [System Message Generated] System Message Preview:', systemMessage?.substring(0, 500) + (systemMessage?.length > 500 ? '...' : '')) + console.log('📋 [System Message Generated] =====================================') + return systemMessage } diff --git a/src/vs/workbench/contrib/void/browser/toolsService.ts b/src/vs/workbench/contrib/void/browser/toolsService.ts index dbd0bdd17be..f8cdc6836b5 100644 --- a/src/vs/workbench/contrib/void/browser/toolsService.ts +++ b/src/vs/workbench/contrib/void/browser/toolsService.ts @@ -203,6 +203,7 @@ export class ToolsService implements IToolsService { is_regex: isRegexUnknown, page_number: pageNumberUnknown } = params + console.log('[toolsService] validateParams.search_for_files called', { queryUnknown, searchInFolderUnknown, isRegexUnknown, pageNumberUnknown }); const queryStr = validateStr('query', queryUnknown) const pageNumber = validatePageNum(pageNumberUnknown) const searchInFolder = validateOptionalURI(searchInFolderUnknown) @@ -295,27 +296,37 @@ export class ToolsService implements IToolsService { this.callTool = { read_file: async ({ uri, startLine, endLine, pageNumber }) => { + console.log('[read_file] callTool entry', { uri: uri.toString(), startLine, endLine, pageNumber }); await voidModelService.initializeModel(uri) + console.log('[read_file] after initializeModel'); const { model } = await voidModelService.getModelSafe(uri) - if (model === null) { throw new Error(`No contents; File does not exist.`) } + console.log('[read_file] got model', { hasModel: !!model }); + if (model === null) { + console.error('[read_file] No contents; File does not exist.', uri); + throw new Error(`No contents; File does not exist.`) + } let contents: string if (startLine === null && endLine === null) { contents = model.getValue(EndOfLinePreference.LF) + console.log('[read_file] getValue full', { length: contents.length }); } else { const startLineNumber = startLine === null ? 1 : startLine const endLineNumber = endLine === null ? model.getLineCount() : endLine contents = model.getValueInRange({ startLineNumber, startColumn: 1, endLineNumber, endColumn: Number.MAX_SAFE_INTEGER }, EndOfLinePreference.LF) + console.log('[read_file] getValueInRange', { startLineNumber, endLineNumber, length: contents.length }); } const totalNumLines = model.getLineCount() + console.log('[read_file] totalNumLines', totalNumLines); const fromIdx = MAX_FILE_CHARS_PAGE * (pageNumber - 1) const toIdx = MAX_FILE_CHARS_PAGE * pageNumber - 1 const fileContents = contents.slice(fromIdx, toIdx + 1) // paginate const hasNextPage = (contents.length - 1) - toIdx >= 1 const totalFileLen = contents.length + console.log('[read_file] pagination', { fromIdx, toIdx, fileContentsLen: fileContents.length, hasNextPage, totalFileLen }); return { result: { fileContents, totalFileLen, hasNextPage, totalNumLines } } }, @@ -349,6 +360,7 @@ export class ToolsService implements IToolsService { }, search_for_files: async ({ query: queryStr, isRegex, searchInFolder, pageNumber }) => { + console.log('[toolsService] search_for_files called', { queryStr, isRegex, searchInFolder, pageNumber }); const searchFolders = searchInFolder === null ? workspaceContextService.getWorkspace().folders.map(f => f.uri) : [searchInFolder] diff --git a/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.impl.ts b/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.impl.ts index b4c794e2074..36b197ffc60 100644 --- a/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.impl.ts +++ b/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.impl.ts @@ -310,6 +310,16 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE // max_completion_tokens: maxTokens, } + // 日志:显示发送给OpenAI API的完整请求 + console.log('🔄 [OpenAI-Compatible API Request] =====================================') + console.log('🔄 [OpenAI-Compatible API Request] Provider:', providerName) + console.log('🔄 [OpenAI-Compatible API Request] Model:', modelName) + console.log('🔄 [OpenAI-Compatible API Request] Options:', JSON.stringify(options, null, 2)) + if (separateSystemMessage) { + console.log('🔄 [OpenAI-Compatible API Request] System Message:', separateSystemMessage) + } + console.log('🔄 [OpenAI-Compatible API Request] =====================================') + // open source models - manually parse think tokens const { needsManualParse: needsManualReasoningParse, nameOfFieldInDelta: nameOfReasoningFieldInDelta } = providerReasoningIOSettings?.output ?? {} const manuallyParseReasoning = needsManualReasoningParse && canIOReasoning && openSourceThinkTags @@ -333,12 +343,21 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE let toolId = '' let toolParamsStr = '' + // 日志:记录即将发起的API调用 + console.log('🌐 [API Call Starting] OpenAI Chat Completions Create') + console.log('🌐 [API Call] Endpoint: chat/completions') + console.log('🌐 [API Call] Full Request Options:', JSON.stringify(options, null, 2)) + openai.chat.completions .create(options) .then(async response => { _setAborter(() => response.controller.abort()) + + console.log('🌐 [API Response] OpenAI stream started successfully') + // when receive text for await (const chunk of response) { + // 处理流式响应,不打印chunk日志 // message const newText = chunk.choices[0]?.delta?.content ?? '' fullTextSoFar += newText @@ -370,18 +389,41 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE }) } + + // 日志:记录OpenAI流结束 + console.log('🌐 [API Response] OpenAI stream completed') + // on final if (!fullTextSoFar && !fullReasoningSoFar && !toolName) { + console.log('❌ [OpenAI Error] Empty response from model') onError({ message: 'Void: Response from model was empty.', fullError: null }) } else { const toolCall = rawToolCallObjOfParamsStr(toolName, toolParamsStr, toolId) const toolCallObj = toolCall ? { toolCall } : {} + + // 日志:记录OpenAI最终响应 + console.log('✅ [OpenAI Final Message] =====================================') + console.log('✅ [OpenAI Final Message] Full Text:', fullTextSoFar) + console.log('✅ [OpenAI Final Message] Full Reasoning:', fullReasoningSoFar) + console.log('✅ [OpenAI Final Message] Tool Name:', toolName) + console.log('✅ [OpenAI Final Message] Tool Params:', toolParamsStr) + console.log('✅ [OpenAI Final Message] Tool Call Object:', toolCall) + console.log('✅ [OpenAI Final Message] =====================================') + onFinalMessage({ fullText: fullTextSoFar, fullReasoning: fullReasoningSoFar, anthropicReasoning: null, ...toolCallObj }); } }) // when error/fail - this catches errors of both .create() and .then(for await) .catch(error => { + // 日志:记录OpenAI错误 + console.log('❌ [OpenAI Error] =====================================') + console.log('❌ [OpenAI Error] Error Object:', error) + console.log('❌ [OpenAI Error] Error Status:', error instanceof OpenAI.APIError ? error.status : 'N/A') + console.log('❌ [OpenAI Error] Error Message:', error.message || error.toString()) + console.log('❌ [OpenAI Error] Error Stack:', error.stack) + console.log('❌ [OpenAI Error] =====================================') + if (error instanceof OpenAI.APIError && error.status === 401) { onError({ message: invalidApiKeyMessage(providerName), fullError: error }); } else { onError({ message: error + '', fullError: error }); } }) @@ -484,15 +526,28 @@ const sendAnthropicChat = async ({ messages, providerName, onText, onFinalMessag dangerouslyAllowBrowser: true }); - const stream = anthropic.messages.stream({ + const anthropicRequestOptions = { system: separateSystemMessage ?? undefined, messages: messages as AnthropicLLMChatMessage[], model: modelName, max_tokens: maxTokens ?? 4_096, // anthropic requires this ...includeInPayload, ...nativeToolsObj, + } - }) + // 日志:显示发送给Anthropic API的完整请求 + console.log('🤖 [Anthropic API Request] =====================================') + console.log('🤖 [Anthropic API Request] Provider:', providerName) + console.log('🤖 [Anthropic API Request] Model:', modelName) + console.log('🤖 [Anthropic API Request] Options:', JSON.stringify(anthropicRequestOptions, null, 2)) + console.log('🤖 [Anthropic API Request] =====================================') + + console.log('🌐 [API Call Starting] Anthropic Messages Stream') + console.log('🌐 [API Call] Endpoint: messages (streaming)') + + const stream = anthropic.messages.stream(anthropicRequestOptions) + + console.log('🌐 [API Response] Anthropic stream created successfully') // manually parse out tool results if XML if (!specialToolFormat) { @@ -518,6 +573,8 @@ const sendAnthropicChat = async ({ messages, providerName, onText, onFinalMessag } // there are no events for tool_use, it comes in at the end stream.on('streamEvent', e => { + // 日志:记录每个Anthropic流事件 + console.log('🔄 [Anthropic Stream Event]', JSON.stringify(e, null, 2)) // start block if (e.type === 'content_block_start') { if (e.content_block.type === 'text') { @@ -561,17 +618,31 @@ const sendAnthropicChat = async ({ messages, providerName, onText, onFinalMessag // on done - (or when error/fail) - this is called AFTER last streamEvent stream.on('finalMessage', (response) => { + // 日志:记录Anthropic最终响应 + console.log('✅ [Anthropic Final Message] =====================================') + console.log('✅ [Anthropic Final Message] Raw Response:', JSON.stringify(response, null, 2)) + const anthropicReasoning = response.content.filter(c => c.type === 'thinking' || c.type === 'redacted_thinking') const tools = response.content.filter(c => c.type === 'tool_use') - // console.log('TOOLS!!!!!!', JSON.stringify(tools, null, 2)) - // console.log('TOOLS!!!!!!', JSON.stringify(response, null, 2)) const toolCall = tools[0] && rawToolCallObjOfAnthropicParams(tools[0]) const toolCallObj = toolCall ? { toolCall } : {} + console.log('✅ [Anthropic Final Message] Processed - Full Text:', fullText) + console.log('✅ [Anthropic Final Message] Processed - Full Reasoning:', fullReasoning) + console.log('✅ [Anthropic Final Message] Processed - Tool Call:', toolCall) + console.log('✅ [Anthropic Final Message] =====================================') + onFinalMessage({ fullText, fullReasoning, anthropicReasoning, ...toolCallObj }) }) // on error stream.on('error', (error) => { + // 日志:记录Anthropic错误 + console.log('❌ [Anthropic Error] =====================================') + console.log('❌ [Anthropic Error] Error Object:', error) + console.log('❌ [Anthropic Error] Error Status:', error instanceof Anthropic.APIError ? error.status : 'N/A') + console.log('❌ [Anthropic Error] Error Message:', error.message || error.toString()) + console.log('❌ [Anthropic Error] =====================================') + if (error instanceof Anthropic.APIError && error.status === 401) { onError({ message: invalidApiKeyMessage(providerName), fullError: error }) } else { onError({ message: error + '', fullError: error }) } }) @@ -777,8 +848,7 @@ const sendGeminiChat = async ({ let toolParamsStr = '' let toolId = '' - - genAI.models.generateContentStream({ + const geminiRequestOptions = { model: modelName, config: { systemInstruction: separateSystemMessage, @@ -786,12 +856,28 @@ const sendGeminiChat = async ({ tools: toolConfig, }, contents: messages as GeminiLLMChatMessage[], - }) + } + + // 日志:显示发送给Gemini API的完整请求 + console.log('🧠 [Gemini API Request] =====================================') + console.log('🧠 [Gemini API Request] Provider:', providerName) + console.log('🧠 [Gemini API Request] Model:', modelName) + console.log('🧠 [Gemini API Request] Options:', JSON.stringify(geminiRequestOptions, null, 2)) + console.log('🧠 [Gemini API Request] =====================================') + + console.log('🌐 [API Call Starting] Gemini Generate Content Stream') + console.log('🌐 [API Call] Endpoint: generateContentStream') + + genAI.models.generateContentStream(geminiRequestOptions) .then(async (stream) => { _setAborter(() => { stream.return(fullTextSoFar); }); + console.log('🌐 [API Response] Gemini stream created successfully') + // Process the stream for await (const chunk of stream) { + // 日志:记录每个Gemini流响应块 + console.log('🔄 [Gemini Stream Chunk]', JSON.stringify(chunk, null, 2)) // message const newText = chunk.text ?? '' fullTextSoFar += newText @@ -815,17 +901,38 @@ const sendGeminiChat = async ({ }) } + // 日志:记录Gemini流结束 + console.log('🌐 [API Response] Gemini stream completed') + // on final if (!fullTextSoFar && !fullReasoningSoFar && !toolName) { + console.log('❌ [Gemini Error] Empty response from model') onError({ message: 'Void: Response from model was empty.', fullError: null }) } else { if (!toolId) toolId = generateUuid() // ids are empty, but other providers might expect an id const toolCall = rawToolCallObjOfParamsStr(toolName, toolParamsStr, toolId) const toolCallObj = toolCall ? { toolCall } : {} + + // 日志:记录Gemini最终响应 + console.log('✅ [Gemini Final Message] =====================================') + console.log('✅ [Gemini Final Message] Full Text:', fullTextSoFar) + console.log('✅ [Gemini Final Message] Full Reasoning:', fullReasoningSoFar) + console.log('✅ [Gemini Final Message] Tool Name:', toolName) + console.log('✅ [Gemini Final Message] Tool Params:', toolParamsStr) + console.log('✅ [Gemini Final Message] Tool Call Object:', toolCall) + console.log('✅ [Gemini Final Message] =====================================') + onFinalMessage({ fullText: fullTextSoFar, fullReasoning: fullReasoningSoFar, anthropicReasoning: null, ...toolCallObj }); } }) .catch(error => { + // 日志:记录Gemini错误 + console.log('❌ [Gemini Error] =====================================') + console.log('❌ [Gemini Error] Error Object:', error) + console.log('❌ [Gemini Error] Error Message:', error?.message || error.toString()) + console.log('❌ [Gemini Error] Error Stack:', error?.stack) + console.log('❌ [Gemini Error] =====================================') + const message = error?.message if (typeof message === 'string') { diff --git a/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.ts b/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.ts index 27f35ad556c..9dde9bbbdc5 100644 --- a/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.ts +++ b/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.ts @@ -61,13 +61,31 @@ export const sendLLMMessage = async ({ const onText: OnText = (params) => { const { fullText } = params if (_didAbort) return + // 不打印流式响应日志,直接转发给回调 onText_(params) _fullTextSoFar = fullText } const onFinalMessage: OnFinalMessage = (params) => { - const { fullText, fullReasoning, toolCall } = params + const { fullText, fullReasoning, toolCall, anthropicReasoning } = params if (_didAbort) return + + // 日志:显示完整的最终响应 + console.log('✅ [LLM Final Response] =====================================') + console.log('✅ [LLM Final Response] Provider:', providerName, 'Model:', modelName) + console.log('✅ [LLM Final Response] Full Text:', fullText) + if (fullReasoning) { + console.log('✅ [LLM Final Response] Reasoning:', fullReasoning) + } + if (anthropicReasoning) { + console.log('✅ [LLM Final Response] Anthropic Reasoning:', anthropicReasoning) + } + if (toolCall) { + console.log('✅ [LLM Final Response] Tool Call:', toolCall) + } + console.log('✅ [LLM Final Response] Duration:', new Date().getTime() - submit_time.getTime(), 'ms') + console.log('✅ [LLM Final Response] =====================================') + captureLLMEvent(`${loggingName} - Received Full Message`, { messageLength: fullText.length, reasoningLength: fullReasoning?.length, duration: new Date().getMilliseconds() - submit_time.getMilliseconds(), toolCallName: toolCall?.name }) onFinalMessage_(params) } @@ -94,10 +112,21 @@ export const sendLLMMessage = async ({ abortRef_.current = onAbort - if (messagesType === 'chatMessages') + if (messagesType === 'chatMessages') { captureLLMEvent(`${loggingName} - Sending Message`, {}) - else if (messagesType === 'FIMMessage') + // 日志:显示发送给大模型的消息 + console.log('🚀 [LLM Request] Provider:', providerName, 'Model:', modelName) + console.log('🚀 [LLM Request] System Message:', separateSystemMessage) + console.log('🚀 [LLM Request] Messages:', JSON.stringify(messages_, null, 2)) + console.log('🚀 [LLM Request] Chat Mode:', chatMode) + } + else if (messagesType === 'FIMMessage') { captureLLMEvent(`${loggingName} - Sending FIM`, { prefixLen: messages_?.prefix?.length, suffixLen: messages_?.suffix?.length }) + // 日志:显示FIM请求 + console.log('🚀 [FIM Request] Provider:', providerName, 'Model:', modelName) + console.log('🚀 [FIM Request] Prefix:', messages_?.prefix?.substring(0, 200) + (messages_?.prefix?.length > 200 ? '...' : '')) + console.log('🚀 [FIM Request] Suffix:', messages_?.suffix?.substring(0, 200) + (messages_?.suffix?.length > 200 ? '...' : '')) + } try { diff --git a/src/vs/workbench/services/search/common/searchExtConversionTypes.ts b/src/vs/workbench/services/search/common/searchExtConversionTypes.ts index 5e96fef6d40..e6614d719a0 100644 --- a/src/vs/workbench/services/search/common/searchExtConversionTypes.ts +++ b/src/vs/workbench/services/search/common/searchExtConversionTypes.ts @@ -517,6 +517,7 @@ export class OldTextSearchProviderConverter implements TextSearchProvider2 { constructor(private provider: TextSearchProvider) { } provideTextSearchResults(query: TextSearchQuery2, options: TextSearchProviderOptions, progress: IProgress, token: CancellationToken): ProviderResult { + console.log('[OldTextSearchProviderConverter] provideTextSearchResults called', { query, options }); const progressShim = (oldResult: TextSearchResult) => { if (!validateProviderResult(oldResult)) { diff --git a/src/vs/workbench/services/search/common/searchService.ts b/src/vs/workbench/services/search/common/searchService.ts index b0981b9891c..ad9c353fbd5 100644 --- a/src/vs/workbench/services/search/common/searchService.ts +++ b/src/vs/workbench/services/search/common/searchService.ts @@ -80,6 +80,7 @@ export class SearchService extends Disposable implements ISearchService { } async textSearch(query: ITextQuery, token?: CancellationToken, onProgress?: (item: ISearchProgressItem) => void): Promise { + console.log('[searchService] textSearch called', query); const results = this.textSearchSplitSyncAsync(query, token, onProgress); const openEditorResults = results.syncResults; const otherResults = await results.asyncResults; @@ -91,6 +92,8 @@ export class SearchService extends Disposable implements ISearchService { } async aiTextSearch(query: IAITextQuery, token?: CancellationToken, onProgress?: (item: ISearchProgressItem) => void): Promise { + this.logService.info('SearchService#aiTextSearch called', query.contentPattern); + const onProviderProgress = (progress: ISearchProgressItem) => { // Match if (onProgress) { // don't override open editor results @@ -171,6 +174,7 @@ export class SearchService extends Disposable implements ISearchService { } private doSearch(query: ISearchQuery, token?: CancellationToken, onProgress?: (item: ISearchProgressItem) => void): Promise { + console.log('[searchService] doSearch called', JSON.stringify(query)); this.logService.trace('SearchService#search', JSON.stringify(query)); const schemesInQuery = this.getSchemesInQuery(query); diff --git a/src/vs/workbench/services/search/common/textSearchManager.ts b/src/vs/workbench/services/search/common/textSearchManager.ts index 59a10ed9024..cee49a89653 100644 --- a/src/vs/workbench/services/search/common/textSearchManager.ts +++ b/src/vs/workbench/services/search/common/textSearchManager.ts @@ -177,7 +177,9 @@ export class TextSearchManager { let result; if (this.queryProviderPair.query.type === QueryType.aiText) { + console.log('[TextSearchManager] Calling provideAITextSearchResults with query:', this.queryProviderPair.query.contentPattern); result = await (this.queryProviderPair as IAITextQueryProviderPair).provider.provideAITextSearchResults(this.queryProviderPair.query.contentPattern, searchOptions, progress, token); + console.log('[TextSearchManager] provideAITextSearchResults returned result:', result); } else { result = await (this.queryProviderPair as ITextQueryProviderPair).provider.provideTextSearchResults(patternInfoToQuery(this.queryProviderPair.query.contentPattern), searchOptions, progress, token); } diff --git a/src/vs/workbench/services/search/node/ripgrepSearchProvider.ts b/src/vs/workbench/services/search/node/ripgrepSearchProvider.ts index d9577cd7d31..82cf7fe7c08 100644 --- a/src/vs/workbench/services/search/node/ripgrepSearchProvider.ts +++ b/src/vs/workbench/services/search/node/ripgrepSearchProvider.ts @@ -19,6 +19,7 @@ export class RipgrepSearchProvider implements TextSearchProvider2 { } async provideTextSearchResults(query: TextSearchQuery2, options: TextSearchProviderOptions, progress: Progress, token: CancellationToken): Promise { + console.log('[RipgrepSearchProvider] provideTextSearchResults called', { query, options }); const numThreads = await this.getNumThreads(); const engine = new RipgrepTextSearchEngine(this.outputChannel, numThreads); diff --git a/src/vs/workbench/services/search/node/ripgrepTextSearchEngine.ts b/src/vs/workbench/services/search/node/ripgrepTextSearchEngine.ts index 188dc598f75..f6bd3bcda5d 100644 --- a/src/vs/workbench/services/search/node/ripgrepTextSearchEngine.ts +++ b/src/vs/workbench/services/search/node/ripgrepTextSearchEngine.ts @@ -29,6 +29,7 @@ export class RipgrepTextSearchEngine { constructor(private outputChannel: IOutputChannel, private readonly _numThreads?: number | undefined) { } provideTextSearchResults(query: TextSearchQuery2, options: TextSearchProviderOptions, progress: Progress, token: CancellationToken): Promise { + console.log('[RipgrepTextSearchEngine] provideTextSearchResults called', { query, options }); return Promise.all(options.folderOptions.map(folderOption => { const extendedOptions: RipgrepTextSearchOptions = { folderOptions: folderOption, diff --git a/src/vs/workbench/services/search/worker/localFileSearch.ts b/src/vs/workbench/services/search/worker/localFileSearch.ts index aea6f1107d1..0824ece04d8 100644 --- a/src/vs/workbench/services/search/worker/localFileSearch.ts +++ b/src/vs/workbench/services/search/worker/localFileSearch.ts @@ -60,19 +60,23 @@ export class LocalFileSearchWorker implements ILocalFileSearchWorker, IWebWorker constructor(workerServer: IWebWorkerServer) { this.host = LocalFileSearchWorkerHost.getChannel(workerServer); + console.log('[LocalFileSearchWorker] Worker constructed'); } $cancelQuery(queryId: number): void { + console.log(`[LocalFileSearchWorker] Cancel query: ${queryId}`); this.cancellationTokens.get(queryId)?.cancel(); } private registerCancellationToken(queryId: number): CancellationTokenSource { + console.log(`[LocalFileSearchWorker] Register cancellation token for query: ${queryId}`); const source = new CancellationTokenSource(); this.cancellationTokens.set(queryId, source); return source; } async $listDirectory(handle: IWorkerFileSystemDirectoryHandle, query: IFileQueryProps, folderQuery: IFolderQuery, ignorePathCasing: boolean, queryId: number): Promise { + console.log(`[LocalFileSearchWorker] $listDirectory called, queryId: ${queryId}, pattern: ${query.filePattern}`); const revivedFolderQuery = reviveFolderQuery(folderQuery); const extUri = new ExtUri(() => ignorePathCasing); @@ -98,6 +102,7 @@ export class LocalFileSearchWorker implements ILocalFileSearchWorker, IWebWorker limitHit = true; token.cancel(); } + console.log(`[LocalFileSearchWorker] $listDirectory found file: ${file.path}`); return entries.push(file.path); }, token.token)); @@ -108,6 +113,7 @@ export class LocalFileSearchWorker implements ILocalFileSearchWorker, IWebWorker } async $searchDirectory(handle: IWorkerFileSystemDirectoryHandle, query: ITextQueryProps, folderQuery: IFolderQuery, ignorePathCasing: boolean, queryId: number): Promise { + console.log(`[LocalFileSearchWorker] $searchDirectory called, queryId: ${queryId}, pattern: ${query.contentPattern?.pattern}`); const revivedQuery = reviveFolderQuery(folderQuery); const extUri = new ExtUri(() => ignorePathCasing); @@ -130,6 +136,7 @@ export class LocalFileSearchWorker implements ILocalFileSearchWorker, IWebWorker } fileCount++; + console.log(`[LocalFileSearchWorker] Processing file: ${file.path}`); const contents = await file.resolve(); if (token.token.isCancellationRequested) { @@ -148,6 +155,7 @@ export class LocalFileSearchWorker implements ILocalFileSearchWorker, IWebWorker if (query.maxResults && resultCount > query.maxResults) { token.cancel(); } + console.log(`[LocalFileSearchWorker] File matched: ${file.path}, matches: ${fileResults.length}`); const match = { resource: URI.joinPath(revivedQuery.folder, file.path), results: fileResults, @@ -165,6 +173,7 @@ export class LocalFileSearchWorker implements ILocalFileSearchWorker, IWebWorker if (PERF) { console.log('Searched in', fileCount, 'files'); } + console.log(`[LocalFileSearchWorker] $searchDirectory finished, total files: ${fileCount}, total matches: ${resultCount}`); return { results, limitHit,