From 9dbc802cf62440e73072a2d697d7724454605181 Mon Sep 17 00:00:00 2001 From: Manu K Date: Mon, 3 Nov 2025 17:17:36 +0530 Subject: [PATCH 1/7] feat: add Google Gemini API dependency - Install @google/genai v1.28.0 package - Add gemini option to aiServiceProvider enum - Add geminiApiKey configuration setting with link to Google AI Studio - Add geminiModel configuration setting with default gemini-2.0-flash-exp - Include documentation for popular Gemini models --- package-lock.json | 608 ++++++++++++++++++++++++++++++++++++++++++++-- package.json | 15 +- 2 files changed, 601 insertions(+), 22 deletions(-) diff --git a/package-lock.json b/package-lock.json index 0a79ec5..919f6c8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,13 +1,14 @@ { "name": "diffy-explain-ai", - "version": "1.0.17", + "version": "1.1.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "diffy-explain-ai", - "version": "1.0.17", + "version": "1.1.0", "dependencies": { + "@google/genai": "^1.28.0", "axios": "^1.12.0", "axios-retry": "^3.5.0", "openai": "^4.7.0", @@ -28,7 +29,7 @@ "webpack-cli": "^5.0.0" }, "engines": { - "vscode": "^1.90.0" + "vscode": "^1.105.0" } }, "node_modules/@babel/runtime": { @@ -211,6 +212,123 @@ "node": ">=10.0.0" } }, + "node_modules/@google/genai": { + "version": "1.28.0", + "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.28.0.tgz", + "integrity": "sha512-0pfZ1EWQsM9kINsL+mFKJvpzM6NRHS9t360S1MzKq4JtIwTj/RbsPpC/K5wpKiPy9PC+J+bsz/9gvaL51++KrA==", + "license": "Apache-2.0", + "dependencies": { + "google-auth-library": "^10.3.0", + "ws": "^8.18.0" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "@modelcontextprotocol/sdk": "^1.20.1" + }, + "peerDependenciesMeta": { + "@modelcontextprotocol/sdk": { + "optional": true + } + } + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.2", "dev": true, @@ -274,6 +392,16 @@ "version": "1.1.1", "license": "MIT" }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@tootallnate/once": { "version": "1.1.2", "dev": true, @@ -627,7 +755,6 @@ }, "node_modules/ansi-regex": { "version": "5.0.1", - "dev": true, "license": "MIT", "engines": { "node": ">=8" @@ -635,7 +762,6 @@ }, "node_modules/ansi-styles": { "version": "4.3.0", - "dev": true, "license": "MIT", "dependencies": { "color-convert": "^2.0.1" @@ -689,12 +815,31 @@ }, "node_modules/balanced-match": { "version": "1.0.2", - "dev": true, "license": "MIT" }, "node_modules/base-64": { "version": "0.1.0" }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/big-integer": { "version": "1.6.51", "dev": true, @@ -703,6 +848,15 @@ "node": ">=0.6" } }, + "node_modules/bignumber.js": { + "version": "9.3.1", + "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", + "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", + "license": "MIT", + "engines": { + "node": "*" + } + }, "node_modules/binary": { "version": "0.3.0", "dev": true, @@ -777,6 +931,12 @@ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" } }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", + "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", + "license": "BSD-3-Clause" + }, "node_modules/buffer-from": { "version": "1.1.2", "dev": true, @@ -936,7 +1096,6 @@ }, "node_modules/color-convert": { "version": "2.0.1", - "dev": true, "license": "MIT", "dependencies": { "color-name": "~1.1.4" @@ -947,7 +1106,6 @@ }, "node_modules/color-name": { "version": "1.1.4", - "dev": true, "license": "MIT" }, "node_modules/colorette": { @@ -981,8 +1139,9 @@ "license": "MIT" }, "node_modules/cross-spawn": { - "version": "7.0.3", - "dev": true, + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "license": "MIT", "dependencies": { "path-key": "^3.1.0", @@ -1000,6 +1159,15 @@ "node": "*" } }, + "node_modules/data-uri-to-buffer": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", + "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, "node_modules/debug": { "version": "4.3.4", "license": "MIT", @@ -1071,6 +1239,21 @@ "readable-stream": "^2.0.2" } }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "license": "MIT" + }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", + "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, "node_modules/electron-to-chromium": { "version": "1.4.284", "dev": true, @@ -1078,7 +1261,6 @@ }, "node_modules/emoji-regex": { "version": "8.0.0", - "dev": true, "license": "MIT" }, "node_modules/enhanced-resolve": { @@ -1227,6 +1409,12 @@ "node": ">=0.8.x" } }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", + "license": "MIT" + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "dev": true, @@ -1245,6 +1433,38 @@ "node": ">= 4.9.1" } }, + "node_modules/fetch-blob": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", + "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "paypal", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "dependencies": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + }, + "engines": { + "node": "^12.20 || >= 14.13" + } + }, + "node_modules/fetch-blob/node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, "node_modules/fill-range": { "version": "7.0.1", "dev": true, @@ -1298,6 +1518,22 @@ } } }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/form-data": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz", @@ -1329,6 +1565,18 @@ "node": ">= 12.20" } }, + "node_modules/formdata-polyfill": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", + "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + "license": "MIT", + "dependencies": { + "fetch-blob": "^3.1.2" + }, + "engines": { + "node": ">=12.20.0" + } + }, "node_modules/fs.realpath": { "version": "1.0.0", "dev": true, @@ -1387,6 +1635,134 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/gaxios": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-7.1.3.tgz", + "integrity": "sha512-YGGyuEdVIjqxkxVH1pUTMY/XtmmsApXrCVv5EU25iX6inEPbV+VakJfLealkBtJN69AQmh1eGOdCl9Sm1UP6XQ==", + "license": "Apache-2.0", + "dependencies": { + "extend": "^3.0.2", + "https-proxy-agent": "^7.0.1", + "node-fetch": "^3.3.2", + "rimraf": "^5.0.1" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/gaxios/node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/gaxios/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/gaxios/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/gaxios/node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/gaxios/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/gaxios/node_modules/node-fetch": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", + "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + "license": "MIT", + "dependencies": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.4", + "formdata-polyfill": "^4.0.10" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/node-fetch" + } + }, + "node_modules/gaxios/node_modules/rimraf": { + "version": "5.0.10", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-5.0.10.tgz", + "integrity": "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==", + "license": "ISC", + "dependencies": { + "glob": "^10.3.7" + }, + "bin": { + "rimraf": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/gcp-metadata": { + "version": "8.1.2", + "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-8.1.2.tgz", + "integrity": "sha512-zV/5HKTfCeKWnxG0Dmrw51hEWFGfcF2xiXqcA3+J90WDuP0SvoiSO5ORvcBsifmx/FoIjgQN3oNOGaQ5PhLFkg==", + "license": "Apache-2.0", + "dependencies": { + "gaxios": "^7.0.0", + "google-logging-utils": "^1.0.0", + "json-bigint": "^1.0.0" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/get-caller-file": { "version": "2.0.5", "dev": true, @@ -1474,6 +1850,33 @@ "node": ">=10" } }, + "node_modules/google-auth-library": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-10.5.0.tgz", + "integrity": "sha512-7ABviyMOlX5hIVD60YOfHw4/CxOfBhyduaYB+wbFWCWoni4N7SLcV46hrVRktuBbZjFC9ONyqamZITN7q3n32w==", + "license": "Apache-2.0", + "dependencies": { + "base64-js": "^1.3.0", + "ecdsa-sig-formatter": "^1.0.11", + "gaxios": "^7.0.0", + "gcp-metadata": "^8.0.0", + "google-logging-utils": "^1.0.0", + "gtoken": "^8.0.0", + "jws": "^4.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/google-logging-utils": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-1.1.2.tgz", + "integrity": "sha512-YsFPGVgDFf4IzSwbwIR0iaFJQFmR5Jp7V1WuYSjuRgAm9yWqsMhKE9YPlL+wvFLnc/wMiFV4SQUD9Y/JMpxIxQ==", + "license": "Apache-2.0", + "engines": { + "node": ">=14" + } + }, "node_modules/gopd": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", @@ -1491,6 +1894,19 @@ "dev": true, "license": "ISC" }, + "node_modules/gtoken": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-8.0.0.tgz", + "integrity": "sha512-+CqsMbHPiSTdtSO14O51eMNlrp9N79gmeqmXeouJOhfucAedHw9noVe/n5uJk3tbKE6a+6ZCQg3RPhVhHByAIw==", + "license": "MIT", + "dependencies": { + "gaxios": "^7.0.0", + "jws": "^4.0.0" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/has": { "version": "1.0.3", "dev": true, @@ -1665,7 +2081,6 @@ }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", - "dev": true, "license": "MIT", "engines": { "node": ">=8" @@ -1737,7 +2152,6 @@ }, "node_modules/isexe": { "version": "2.0.0", - "dev": true, "license": "ISC" }, "node_modules/isobject": { @@ -1748,6 +2162,21 @@ "node": ">=0.10.0" } }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, "node_modules/jest-worker": { "version": "27.5.1", "dev": true, @@ -1786,6 +2215,15 @@ "js-yaml": "bin/js-yaml.js" } }, + "node_modules/json-bigint": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", + "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", + "license": "MIT", + "dependencies": { + "bignumber.js": "^9.0.0" + } + }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", "dev": true, @@ -1796,6 +2234,27 @@ "dev": true, "license": "MIT" }, + "node_modules/jwa": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", + "integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "^1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", + "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", + "license": "MIT", + "dependencies": { + "jwa": "^2.0.0", + "safe-buffer": "^5.0.1" + } + }, "node_modules/kind-of": { "version": "6.0.3", "dev": true, @@ -1928,6 +2387,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/mkdirp": { "version": "0.5.6", "dev": true, @@ -2179,6 +2647,12 @@ "node": ">=6" } }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "license": "BlueOak-1.0.0" + }, "node_modules/path-exists": { "version": "4.0.0", "dev": true, @@ -2197,7 +2671,6 @@ }, "node_modules/path-key": { "version": "3.1.1", - "dev": true, "license": "MIT", "engines": { "node": ">=8" @@ -2208,6 +2681,28 @@ "dev": true, "license": "MIT" }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "license": "ISC" + }, "node_modules/picocolors": { "version": "1.0.0", "dev": true, @@ -2427,7 +2922,6 @@ }, "node_modules/safe-buffer": { "version": "5.2.1", - "dev": true, "funding": [ { "type": "github", @@ -2501,7 +2995,6 @@ }, "node_modules/shebang-command": { "version": "2.0.0", - "dev": true, "license": "MIT", "dependencies": { "shebang-regex": "^3.0.0" @@ -2512,12 +3005,23 @@ }, "node_modules/shebang-regex": { "version": "3.0.0", - "dev": true, "license": "MIT", "engines": { "node": ">=8" } }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/simple-git": { "version": "3.16.0", "license": "MIT", @@ -2563,7 +3067,21 @@ }, "node_modules/string-width": { "version": "4.2.3", - "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "license": "MIT", "dependencies": { "emoji-regex": "^8.0.0", @@ -2576,7 +3094,19 @@ }, "node_modules/strip-ansi": { "version": "6.0.1", - "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "license": "MIT", "dependencies": { "ansi-regex": "^5.0.1" @@ -2932,7 +3462,6 @@ }, "node_modules/which": { "version": "2.0.2", - "dev": true, "license": "ISC", "dependencies": { "isexe": "^2.0.0" @@ -2970,11 +3499,50 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/wrappy": { "version": "1.0.2", "dev": true, "license": "ISC" }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, "node_modules/y18n": { "version": "5.0.8", "dev": true, diff --git a/package.json b/package.json index 2d90da3..7a9f6ae 100644 --- a/package.json +++ b/package.json @@ -92,10 +92,11 @@ "type": "string", "enum": [ "openai", - "vscode-lm" + "vscode-lm", + "gemini" ], "default": "vscode-lm", - "markdownDescription": "Select the AI service provider to use for generating commit messages. \n\n- **vscode-lm**: Use VS Code's built-in Language Model API (GitHub Copilot) - Default\n- **openai**: Use OpenAI API directly (requires API key)" + "markdownDescription": "Select the AI service provider to use for generating commit messages. \n\n- **vscode-lm**: Use VS Code's built-in Language Model API (GitHub Copilot) - Default\n- **openai**: Use OpenAI API directly (requires API key)\n- **gemini**: Use Google Gemini API (requires API key)" }, "diffy-explain-ai.vscodeLmModel": { "type": "string", @@ -138,6 +139,15 @@ "default": "gpt-4-turbo", "markdownDescription": "OpenAI models to use for your prompts. [Documentation](https://beta.openai.com/docs/models/models). \n\n**If you face 400 Bad Request please make sure you are using the right model for your integration method.**" }, + "diffy-explain-ai.geminiApiKey": { + "type": "string", + "markdownDescription": "Enter your API key from Google AI Studio [Go to API Key Page](https://aistudio.google.com/app/apikey)" + }, + "diffy-explain-ai.geminiModel": { + "type": "string", + "default": "gemini-2.0-flash-exp", + "markdownDescription": "Google Gemini models to use for your prompts. [Documentation](https://ai.google.dev/gemini-api/docs/models/gemini). \n\nPopular models:\n- **gemini-2.0-flash-exp**: Latest experimental model (fast and capable)\n- **gemini-1.5-pro**: High-capability model for complex tasks\n- **gemini-1.5-flash**: Fast and efficient model" + }, "diffy-explain-ai.temperature": { "type": "number", "default": 0.2, @@ -238,6 +248,7 @@ "webpack-cli": "^5.0.0" }, "dependencies": { + "@google/genai": "^1.28.0", "axios": "^1.12.0", "axios-retry": "^3.5.0", "openai": "^4.7.0", From 5c6b9cca879e7b9e4820938bfeab63653e8e2bf2 Mon Sep 17 00:00:00 2001 From: Manu K Date: Mon, 3 Nov 2025 17:19:12 +0530 Subject: [PATCH 2/7] feat: implement GeminiService for Google Gemini API integration - Create GeminiService class implementing AIService interface - Add getCommitMessageFromDiff() method for generating commit messages - Add getExplainedChanges() method for explaining code changes - Implement caching support for API responses - Add comprehensive error handling with user-friendly messages - Follow same pattern as OpenAiService for consistency --- src/service/GeminiService.ts | 207 +++++++++++++++++++++++++++++++++++ 1 file changed, 207 insertions(+) create mode 100644 src/service/GeminiService.ts diff --git a/src/service/GeminiService.ts b/src/service/GeminiService.ts new file mode 100644 index 0000000..92f23c8 --- /dev/null +++ b/src/service/GeminiService.ts @@ -0,0 +1,207 @@ +import { GoogleGenAI } from "@google/genai"; +import type * as vscode from "vscode"; +import { window } from "vscode"; +import { clearOutput, sendToOutput } from "../utils/log"; +import { CacheService } from "./CacheService"; +import WorkspaceService from "./WorkspaceService"; + +export interface GeminiErrorResponse { + status?: boolean; + error?: Error; +} + +export interface Error { + message?: string; + type?: string; +} + +class GeminiService implements AIService { + static _instance: GeminiService; + cacheService: CacheService; + + private constructor() { + this.cacheService = CacheService.getInstance(); + } + + public static getInstance(): GeminiService { + if (!GeminiService._instance) { + GeminiService._instance = new GeminiService(); + } + return GeminiService._instance; + } + + async getCommitMessageFromDiff( + code: string, + geminiKey: string, + _nameOnly?: boolean, + progress?: vscode.Progress<{ + message?: string | undefined; + increment?: number | undefined; + }>, + ): Promise { + const instructions = WorkspaceService.getInstance().getAIInstructions(); + if (!instructions) { + return null; + } + const response = await this.getFromGemini(instructions, code, geminiKey, progress); + if (response) { + let message = String(response); + message = message.trim(); + message = message.replace(/^"/gm, ""); + message = message.replace(/"$/gm, ""); + return message; + } + return null; + } + + async getExplainedChanges(code: string, geminiKey?: string, nameOnly?: boolean) { + let gitCmd = "git diff --cached"; + if (nameOnly) { + gitCmd = "git diff --cached --name-status"; + } + const instructions = + "You are a bot explains the changes from the result of '" + + gitCmd + + "' that user given. commit message should be a multiple lines where first line doesn't exceeds '50' characters by following commit message guidelines based on the given git diff changes without mentioning itself"; + const response = await this.getFromGemini(instructions, code, geminiKey); + if (response) { + let message = String(response); + message = message.trim(); + message = message.replace(/^"/gm, ""); + message = message.replace(/"$/gm, ""); + return message; + } + return null; + } + + private async getFromGemini( + instructions: string, + prompt: string, + geminiKey?: string, + progress?: vscode.Progress<{ + message?: string | undefined; + increment?: number | undefined; + }>, + ) { + if (!geminiKey) { + return undefined; + } + + const geminiClient = new GoogleGenAI({ apiKey: geminiKey }); + const model = WorkspaceService.getInstance().getGeminiModel(); + const exist = this.cacheService.recordExists(model, instructions + prompt); + + if (exist) { + const result = this.cacheService.get(model, instructions + prompt) as string; + sendToOutput(`result: ${JSON.stringify(result)}`); + return result; + } + + progress?.report({ increment: 50 }); + + clearOutput(); + sendToOutput(`instructions: ${instructions}`); + sendToOutput(`git diff prompt: ${prompt}`); + sendToOutput(`model: ${model}`); + sendToOutput(`temperature: ${WorkspaceService.getInstance().getTemp()}`); + sendToOutput(`max_tokens: ${WorkspaceService.getInstance().getMaxTokens()}`); + + let response: string | undefined; + try { + const result = await geminiClient.models.generateContent({ + model: model, + contents: [ + { + role: "user", + parts: [ + { + text: `${instructions}\n\n${prompt}`, + }, + ], + }, + ], + config: { + temperature: WorkspaceService.getInstance().getTemp(), + maxOutputTokens: WorkspaceService.getInstance().getMaxTokens(), + }, + }); + + response = result.text; + sendToOutput(`result success: ${JSON.stringify(response)}`); + progress?.report({ increment: 49 }); + } catch (reason: unknown) { + console.error(reason); + sendToOutput(`result failed: ${JSON.stringify(reason)}`); + + const hasResponse = ( + err: unknown, + ): err is { + response?: { + statusText?: string; + status?: number; + data?: { error?: { message?: string; type?: string } }; + }; + } => { + return typeof err === "object" && err !== null && "response" in err; + }; + + if (typeof reason === "string" || reason instanceof String) { + window.showErrorMessage(`Gemini Error: ${reason} `); + return undefined; + } + + if (hasResponse(reason)) { + if (reason.response?.statusText) { + window.showErrorMessage( + `Gemini Error: ${reason.response?.data?.error?.message || reason.response.statusText} `, + ); + } else { + window.showErrorMessage("Gemini Error"); + } + + if (reason.response?.status && geminiKey) { + if (reason.response.status === 429) { + window.showInformationMessage( + "Caution: In case the API key has expired, please remove it from the extension settings.", + ); + } + } + + if (reason.response?.data?.error?.type === "invalid_request_error") { + window.showErrorMessage( + "Diffy Error: There was an issue. Server is experiencing downtime/busy. Please try again later.", + ); + progress?.report({ + increment: 1, + message: "\nFailed.", + }); + } else if (reason.response?.data?.error?.message) { + window.showErrorMessage(`Diffy Error: ${reason.response.data.error.message}`); + progress?.report({ + increment: 1, + message: "\nFailed.", + }); + } + } else { + window.showErrorMessage("Gemini Error"); + } + + return undefined; + } + + if (response && response !== "" && response !== "\n") { + if (response.length > 6) { + this.cacheService.set(model, instructions + prompt, response); + } + progress?.report({ + increment: 1, + message: "\nCommit message generated.", + }); + await new Promise((f) => setTimeout(f, 200)); + return response; + } + return undefined; + } +} + +export default GeminiService; From 55cb2faa886646e84255a52284583479ae5e96fd Mon Sep 17 00:00:00 2001 From: Manu K Date: Mon, 3 Nov 2025 17:19:36 +0530 Subject: [PATCH 3/7] feat: add Gemini configuration methods to WorkspaceService - Add getGeminiKey() method to retrieve and validate Gemini API key - Add getGeminiModel() method to retrieve selected Gemini model - Include error messages for missing API key with link to Google AI Studio --- src/service/WorkspaceService.ts | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/service/WorkspaceService.ts b/src/service/WorkspaceService.ts index d6d3dcd..754c0dc 100644 --- a/src/service/WorkspaceService.ts +++ b/src/service/WorkspaceService.ts @@ -104,6 +104,22 @@ export default class WorkspaceService extends EventEmitter { return value; } + getGeminiKey() { + const value = String(this.getConfiguration().get("geminiApiKey")); + if (!value) { + this.showErrorMessage( + "Your Google Gemini API Key is missing; kindly input it within the Diffy Settings section. You can generate a key by visiting Google AI Studio.", + ); + return null; + } + return value; + } + + getGeminiModel() { + const value = String(this.getConfiguration().get("geminiModel")); + return value || "gemini-2.0-flash-exp"; + } + getProxyUrl() { const value = this.getConfiguration().get("proxyUrl") ? String(this.getConfiguration().get("proxyUrl")) From 80e3af92c869298b8753e7d9caae00368e231948 Mon Sep 17 00:00:00 2001 From: Manu K Date: Mon, 3 Nov 2025 17:20:13 +0530 Subject: [PATCH 4/7] feat: integrate GeminiService into Diffy extension - Import and add GeminiService instance management - Add getGeminiService() method - Update getAIService() to support gemini provider - Update all methods to handle Gemini API: * explainAndPreview() * explainDiffToClipboard() * generateCommitMessageToClipboard() * generateCommitMessageToSCM() - Add Gemini API key validation - Add cleanup for Gemini service in dispose() method --- src/Diffy.ts | 77 ++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 72 insertions(+), 5 deletions(-) diff --git a/src/Diffy.ts b/src/Diffy.ts index 8766ddf..8ece97b 100644 --- a/src/Diffy.ts +++ b/src/Diffy.ts @@ -2,6 +2,7 @@ import * as vscode from "vscode"; import { type ExtensionContext, env } from "vscode"; import { EventType } from "./@types/EventType"; import BaseDiffy from "./BaseDiffy"; +import GeminiService from "./service/GeminiService"; import GitService from "./service/GitService"; import OpenAiService from "./service/OpenAiService"; import VsCodeLlmService from "./service/VsCodeLlmService"; @@ -14,6 +15,7 @@ class Diffy extends BaseDiffy { private gitService: GitService | null = null; private _openAIService: OpenAiService | null = null; private _vsCodeLlmService: VsCodeLlmService | null = null; + private _geminiService: GeminiService | null = null; private workspaceService: WorkspaceService | null = null; isEnabled = false; private _windowsService: WindowService | null = null; @@ -71,15 +73,30 @@ class Diffy extends BaseDiffy { return this._vsCodeLlmService; } + /** + * If the _geminiService property is not defined, then create a new instance of the GeminiService + * class and assign it to the _geminiService property. + * @returns The GeminiService object. + */ + getGeminiService(): GeminiService { + if (!this._geminiService) { + this._geminiService = GeminiService.getInstance(); + } + return this._geminiService; + } + /** * Gets the appropriate AI service based on user settings - * @returns The selected AI service (OpenAI or VS Code LLM) + * @returns The selected AI service (OpenAI, VS Code LLM, or Gemini) */ getAIService(): AIService { const provider = this.workspaceService?.getAiServiceProvider(); if (provider === "vscode-lm") { return this.getVsCodeLlmService(); } + if (provider === "gemini") { + return this.getGeminiService(); + } return this.getOpenAPIService(); } @@ -100,12 +117,17 @@ class Diffy extends BaseDiffy { const provider = this.workspaceService?.getAiServiceProvider(); - // Check if API key is required (for OpenAI) + // Check if API key is required (for OpenAI and Gemini) if (provider === "openai") { const apiKey = this.workspaceService?.getOpenAIKey(); if (!apiKey) { return; } + } else if (provider === "gemini") { + const apiKey = this.workspaceService?.getGeminiKey(); + if (!apiKey) { + return; + } } /* Getting the current repo. */ @@ -137,6 +159,12 @@ class Diffy extends BaseDiffy { return; } changes = await (aiService as OpenAiService).getExplainedChanges(diff, apiKey, nameOnly); + } else if (provider === "gemini") { + const apiKey = this.workspaceService?.getGeminiKey(); + if (!apiKey) { + return; + } + changes = await (aiService as GeminiService).getExplainedChanges(diff, apiKey, nameOnly); } else { // VS Code LLM - try with fallback to OpenAI if it fails try { @@ -186,12 +214,17 @@ class Diffy extends BaseDiffy { const provider = this.workspaceService?.getAiServiceProvider(); - // Check if API key is required (for OpenAI) + // Check if API key is required (for OpenAI and Gemini) if (provider === "openai") { const apiKey = this.workspaceService?.getOpenAIKey(); if (!apiKey) { return; } + } else if (provider === "gemini") { + const apiKey = this.workspaceService?.getGeminiKey(); + if (!apiKey) { + return; + } } /* Getting the current repo. */ @@ -223,6 +256,12 @@ class Diffy extends BaseDiffy { return; } changes = await (aiService as OpenAiService).getExplainedChanges(diff, apiKey, nameOnly); + } else if (provider === "gemini") { + const apiKey = this.workspaceService?.getGeminiKey(); + if (!apiKey) { + return; + } + changes = await (aiService as GeminiService).getExplainedChanges(diff, apiKey, nameOnly); } else { // VS Code LLM - try with fallback to OpenAI if it fails try { @@ -273,12 +312,17 @@ class Diffy extends BaseDiffy { const provider = this.workspaceService?.getAiServiceProvider(); - // Check if API key is required (for OpenAI) + // Check if API key is required (for OpenAI and Gemini) if (provider === "openai") { const apiKey = this.workspaceService?.getOpenAIKey(); if (!apiKey) { return; } + } else if (provider === "gemini") { + const apiKey = this.workspaceService?.getGeminiKey(); + if (!apiKey) { + return; + } } /* Getting the current repo. */ @@ -309,6 +353,12 @@ class Diffy extends BaseDiffy { return; } changes = await this.getOpenAPIService().getCommitMessageFromDiff(diff, apiKey, nameOnly); + } else if (provider === "gemini") { + const apiKey = this.workspaceService?.getGeminiKey(); + if (!apiKey) { + return; + } + changes = await this.getGeminiService().getCommitMessageFromDiff(diff, apiKey, nameOnly); } else { // VS Code LLM - try with fallback to OpenAI if it fails try { @@ -368,12 +418,17 @@ class Diffy extends BaseDiffy { const provider = this.workspaceService?.getAiServiceProvider(); - // Check if API key is required (for OpenAI) + // Check if API key is required (for OpenAI and Gemini) if (provider === "openai") { const apiKey = this.workspaceService?.getOpenAIKey(); if (!apiKey) { return; } + } else if (provider === "gemini") { + const apiKey = this.workspaceService?.getGeminiKey(); + if (!apiKey) { + return; + } } /* Getting the current repo. */ @@ -409,6 +464,17 @@ class Diffy extends BaseDiffy { nameOnly, progress, ); + } else if (provider === "gemini") { + const apiKey = this.workspaceService?.getGeminiKey(); + if (!apiKey) { + return; + } + changes = await this.getGeminiService().getCommitMessageFromDiff( + diff, + apiKey, + nameOnly, + progress, + ); } else { // VS Code LLM - try with fallback to OpenAI if it fails try { @@ -461,6 +527,7 @@ class Diffy extends BaseDiffy { this.gitService = null; this._openAIService = null; this._vsCodeLlmService = null; + this._geminiService = null; this.workspaceService = null; } } From 4233b382fcf8777d9f8b6412411dbb77198742ea Mon Sep 17 00:00:00 2001 From: Manu K Date: Mon, 3 Nov 2025 23:22:25 +0530 Subject: [PATCH 5/7] build(deps): update Node.js types and add undici types --- package-lock.json | 38 +++- package.json | 515 +++++++++++++++++++++++----------------------- tsconfig.json | 2 +- 3 files changed, 293 insertions(+), 262 deletions(-) diff --git a/package-lock.json b/package-lock.json index 919f6c8..27ff47e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -18,13 +18,14 @@ "@biomejs/biome": "2.3.2", "@types/glob": "^8.0.0", "@types/mocha": "^10.0.1", - "@types/node": "16.x", + "@types/node": "^22.19.0", "@types/vscode": "^1.105.0", "@vscode/test-electron": "^2.2.0", "glob": "^8.0.3", "mocha": "^10.1.0", "ts-loader": "^9.4.1", "typescript": "^4.9.3", + "undici-types": "^7.16.0", "webpack": "^5.76.0", "webpack-cli": "^5.0.0" }, @@ -458,8 +459,13 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "16.18.10", - "license": "MIT" + "version": "22.19.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.0.tgz", + "integrity": "sha512-xpr/lmLPQEj+TUnHmR+Ab91/glhJvsqcjB+yY0Ix9GO70H6Lb4FHH5GeqdOE5btAx7eIMwuHkp4H2MSkLcqWbA==", + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } }, "node_modules/@types/node-fetch": { "version": "2.6.5", @@ -469,6 +475,12 @@ "form-data": "^4.0.0" } }, + "node_modules/@types/node/node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "license": "MIT" + }, "node_modules/@types/vscode": { "version": "1.105.0", "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.105.0.tgz", @@ -2608,7 +2620,18 @@ } }, "node_modules/openai/node_modules/@types/node": { - "version": "18.17.16", + "version": "18.19.130", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz", + "integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==", + "license": "MIT", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/openai/node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", "license": "MIT" }, "node_modules/p-limit": { @@ -3256,6 +3279,13 @@ "node": ">=4.2.0" } }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "dev": true, + "license": "MIT" + }, "node_modules/unzipper": { "version": "0.10.11", "dev": true, diff --git a/package.json b/package.json index 7a9f6ae..35096a2 100644 --- a/package.json +++ b/package.json @@ -1,257 +1,258 @@ -{ - "name": "diffy-explain-ai", - "displayName": "Diffy Commit AI - Generate Your Commit Message", - "description": "Generate Commit Message for You or Explains The Changed Code Using Git Diff And OpenAi In Natural Language", - "version": "1.1.0", - "publisher": "hitclaw", - "engines": { - "vscode": "^1.105.0" - }, - "categories": [ - "SCM Providers" - ], - "repository": { - "url": "https://github.com/Hi7cl4w/diffy-explain-ai.git" - }, - "bugs": { - "url": "hhttps://github.com/Hi7cl4w/diffy-explain-ai/issues" - }, - "keywords": [ - "git", - "generate", - "message", - "commit", - "openai", - "vs-code", - "vscode", - "productivity" - ], - "icon": "icons/icon.png", - "main": "./dist/extension.js", - "contributes": { - "menus": { - "scm/title": [ - { - "command": "diffy-explain-ai.generateCommitMessage", - "when": "scmProvider == git", - "group": "navigation", - "title": "Generate Commit Message" - }, - { - "command": "diffy-explain-ai.generateCommitMessage", - "when": "scmProvider == git", - "group": "inline", - "title": "Generate Commit Message" - }, - { - "command": "diffy-explain-ai.generateCommitMessage", - "when": "scmProvider == git", - "group": "inline", - "title": "Generate Commit Message" - } - ] - }, - "commands": [ - { - "command": "diffy-explain-ai.explainDiffClipboard", - "title": "DIFFY: Explain Changes and Copy to Clipboard", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - }, - { - "command": "diffy-explain-ai.generateCommitMessage", - "title": "DIFFY: Generate Commit Message", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - }, - { - "command": "diffy-explain-ai.generateCommitMessageClipboard", - "title": "DIFFY: Generate Commit Message and Copy to Clipboard", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - }, - { - "command": "diffy-explain-ai.explainAndPreview", - "title": "DIFFY: Explain and Preview", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - } - ], - "configuration": { - "type": "object", - "properties": { - "diffy-explain-ai.aiServiceProvider": { - "type": "string", - "enum": [ - "openai", - "vscode-lm", - "gemini" - ], - "default": "vscode-lm", - "markdownDescription": "Select the AI service provider to use for generating commit messages. \n\n- **vscode-lm**: Use VS Code's built-in Language Model API (GitHub Copilot) - Default\n- **openai**: Use OpenAI API directly (requires API key)\n- **gemini**: Use Google Gemini API (requires API key)" - }, - "diffy-explain-ai.vscodeLmModel": { - "type": "string", - "enum": [ - "auto", - "copilot-gpt-4o", - "copilot-gpt-4", - "copilot-gpt-4-turbo", - "copilot-gpt-3.5-turbo", - "copilot-gpt-3.5", - "copilot-o1", - "copilot-o1-mini", - "copilot-o1-preview" - ], - "enumDescriptions": [ - "Automatically selects the best available model (recommended)", - "GitHub Copilot GPT-4o model - Latest and most capable", - "GitHub Copilot GPT-4 model - High capability model", - "GitHub Copilot GPT-4 Turbo model - Fast and capable", - "GitHub Copilot GPT-3.5 Turbo model - Fast and efficient", - "GitHub Copilot GPT-3.5 model - Standard capability", - "GitHub Copilot o1 model - Advanced reasoning model", - "GitHub Copilot o1-mini model - Compact reasoning model", - "GitHub Copilot o1-preview model - Preview of o1 capabilities" - ], - "default": "auto", - "markdownDescription": "Select the VS Code Language Model to use when `aiServiceProvider` is set to `vscode-lm`. Requires GitHub Copilot subscription. \n\nAvailable models may vary based on your Copilot subscription tier:\n\n- **auto**: Automatically selects the best available model\n- **copilot-gpt-4o**: Latest GPT-4o model (recommended for most users)\n- **copilot-gpt-4**: GPT-4 model with high reasoning capabilities\n- **copilot-gpt-4-turbo**: Fast GPT-4 Turbo model\n- **copilot-gpt-3.5-turbo**: Fast and efficient GPT-3.5 Turbo\n- **copilot-gpt-3.5**: Standard GPT-3.5 model\n- **copilot-o1**: Advanced o1 reasoning model (may require higher tier)\n- **copilot-o1-mini**: Compact o1-mini model\n- **copilot-o1-preview**: Preview access to o1 capabilities" - }, - "diffy-explain-ai.proxyUrl": { - "type": "string", - "default": "", - "description": "Provide a user-defined URL for the OpenAI API proxy server. Please ensure the server's availability is confirmed. The default value is: https://api.openai.com/v1." - }, - "diffy-explain-ai.openAiKey": { - "type": "string", - "markdownDescription": "Enter your api key from openai.com [Go to API Key Page](https://beta.openai.com/account/api-keys)" - }, - "diffy-explain-ai.model": { - "type": "string", - "default": "gpt-4-turbo", - "markdownDescription": "OpenAI models to use for your prompts. [Documentation](https://beta.openai.com/docs/models/models). \n\n**If you face 400 Bad Request please make sure you are using the right model for your integration method.**" - }, - "diffy-explain-ai.geminiApiKey": { - "type": "string", - "markdownDescription": "Enter your API key from Google AI Studio [Go to API Key Page](https://aistudio.google.com/app/apikey)" - }, - "diffy-explain-ai.geminiModel": { - "type": "string", - "default": "gemini-2.0-flash-exp", - "markdownDescription": "Google Gemini models to use for your prompts. [Documentation](https://ai.google.dev/gemini-api/docs/models/gemini). \n\nPopular models:\n- **gemini-2.0-flash-exp**: Latest experimental model (fast and capable)\n- **gemini-1.5-pro**: High-capability model for complex tasks\n- **gemini-1.5-flash**: Fast and efficient model" - }, - "diffy-explain-ai.temperature": { - "type": "number", - "default": 0.2, - "description": "Temperature is a parameter used in OpenAI's language models to control the level of randomness and creativity in generated text, with higher values making the output more random and lower values making it more deterministic." - }, - "diffy-explain-ai.maxTokens": { - "type": "number", - "default": 196, - "description": "Parameter in OpenAI's language models that limits the maximum length of the generated text output to a specified number of tokens, helping control the length of the response." - }, - "diffy-explain-ai.aiInstructions": { - "type": "string", - "default": "Analyze the provided git diff --staged output, categorize the changes into a conventional commit type (e.g., feat, fix, docs, chore,style), determine if a scope is applicable, and then synthesize a concise commit message that follows the format [optional scope]: [optional body] [optional footer(s)]" - }, - "diffy-explain-ai.commitMessageType": { - "type": "string", - "enum": [ - "conventional", - "gitmoji", - "custom" - ], - "enumDescriptions": [ - "Conventional Commits format: [optional scope]: ", - "Gitmoji format with emoji prefixes: 🎨 :: ", - "Custom user-defined format using a template with placeholders" - ], - "default": "conventional", - "markdownDescription": "Choose the commit message format style:\n\n- **conventional**: Standard Conventional Commits format (e.g., `feat: add new feature`)\n- **gitmoji**: Commits with emoji prefixes (e.g., `✨ feat: add new feature`)\n- **custom**: Fully customizable template with placeholders (configure via Custom Commit Prompt setting)" - }, - "diffy-explain-ai.customCommitPrompt": { - "type": "string", - "default": "Generate a commit message for the following git diff.\n\nRequirements:\n- Maximum subject length: {maxLength} characters\n- Use imperative mood\n- Be concise and clear{bodyInstructions}\n\nReturn ONLY the commit message, no explanations.", - "markdownDescription": "Custom commit message generation prompt when **Commit Message Type** is set to `custom`. \n\nSupported placeholders:\n- `{maxLength}` - Maximum subject line length\n- `{bodyInstructions}` - Auto-filled based on Include Commit Body setting\n- `{locale}` - Language/locale for the message\n- `{diff}` - The actual git diff (automatically appended)\n\nExample template:\n```\nGenerate a {locale} commit message following our team convention:\n- Start with JIRA ticket: [PROJ-XXX]\n- Subject max {maxLength} chars\n- Include impact analysis{bodyInstructions}\n```" - }, - "diffy-explain-ai.includeCommitBody": { - "type": "boolean", - "default": false, - "markdownDescription": "Include a detailed body section in commit messages with bullet points explaining the changes. When enabled, commit messages will have:\n\n```\nfeat: add user authentication\n\n- Implement JWT token generation\n- Add login and registration endpoints\n- Create user model and database schema\n```" - }, - "diffy-explain-ai.excludeFilesFromDiff": { - "type": "array", - "items": { - "type": "string" - }, - "default": [ - "package-lock.json", - "yarn.lock", - "pnpm-lock.yaml", - "*.jpg", - "*.png", - "*.gif", - "*.svg", - "*.ico", - "*.woff", - "*.woff2", - "*.ttf", - "*.eot" - ], - "markdownDescription": "File patterns to exclude from AI analysis when generating commit messages. This helps reduce token usage and improve quality by filtering out:\n\n- Lock files (package-lock.json, yarn.lock)\n- Large binary assets (images, fonts)\n- Generated files\n\nSupports glob patterns like `*.log`, `dist/**`, `**/*.min.js`" - }, - "diffy-explain-ai.maxCommitMessageLength": { - "type": "number", - "default": 72, - "minimum": 50, - "maximum": 200, - "markdownDescription": "Maximum character length for commit message subject line. Following best practices:\n\n- **50**: Strict GitHub standard\n- **72**: Common standard (default)\n- **100**: Relaxed limit" - } - } - } - }, - "scripts": { - "vscode:prepublish": "npm run package", - "compile": "webpack", - "watch": "webpack --watch", - "package": "webpack --mode production --devtool hidden-source-map", - "compile-tests": "tsc -p . --outDir out", - "watch-tests": "tsc -p . -w --outDir out", - "pretest": "pnpm run compile-tests && pnpm run compile && pnpm run lint", - "type-check": "tsc --noEmit", - "lint": "biome check --write .", - "lint:check": "biome check .", - "format": "biome format --write .", - "format:check": "biome format .", - "test": "node ./out/test/runTest.js" - }, - "devDependencies": { - "@biomejs/biome": "2.3.2", - "@types/glob": "^8.0.0", - "@types/mocha": "^10.0.1", - "@types/node": "16.x", - "@types/vscode": "^1.105.0", - "@vscode/test-electron": "^2.2.0", - "glob": "^8.0.3", - "mocha": "^10.1.0", - "ts-loader": "^9.4.1", - "typescript": "^4.9.3", - "webpack": "^5.76.0", - "webpack-cli": "^5.0.0" - }, - "dependencies": { - "@google/genai": "^1.28.0", - "axios": "^1.12.0", - "axios-retry": "^3.5.0", - "openai": "^4.7.0", - "simple-git": "^3.16.0" - } -} +{ + "name": "diffy-explain-ai", + "displayName": "Diffy Commit AI - Generate Your Commit Message", + "description": "Generate Commit Message for You or Explains The Changed Code Using Git Diff And OpenAi In Natural Language", + "version": "1.1.0", + "publisher": "hitclaw", + "engines": { + "vscode": "^1.105.0" + }, + "categories": [ + "SCM Providers" + ], + "repository": { + "url": "https://github.com/Hi7cl4w/diffy-explain-ai.git" + }, + "bugs": { + "url": "hhttps://github.com/Hi7cl4w/diffy-explain-ai/issues" + }, + "keywords": [ + "git", + "generate", + "message", + "commit", + "openai", + "vs-code", + "vscode", + "productivity" + ], + "icon": "icons/icon.png", + "main": "./dist/extension.js", + "contributes": { + "menus": { + "scm/title": [ + { + "command": "diffy-explain-ai.generateCommitMessage", + "when": "scmProvider == git", + "group": "navigation", + "title": "Generate Commit Message" + }, + { + "command": "diffy-explain-ai.generateCommitMessage", + "when": "scmProvider == git", + "group": "inline", + "title": "Generate Commit Message" + }, + { + "command": "diffy-explain-ai.generateCommitMessage", + "when": "scmProvider == git", + "group": "inline", + "title": "Generate Commit Message" + } + ] + }, + "commands": [ + { + "command": "diffy-explain-ai.explainDiffClipboard", + "title": "DIFFY: Explain Changes and Copy to Clipboard", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + }, + { + "command": "diffy-explain-ai.generateCommitMessage", + "title": "DIFFY: Generate Commit Message", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + }, + { + "command": "diffy-explain-ai.generateCommitMessageClipboard", + "title": "DIFFY: Generate Commit Message and Copy to Clipboard", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + }, + { + "command": "diffy-explain-ai.explainAndPreview", + "title": "DIFFY: Explain and Preview", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + } + ], + "configuration": { + "type": "object", + "properties": { + "diffy-explain-ai.aiServiceProvider": { + "type": "string", + "enum": [ + "openai", + "vscode-lm", + "gemini" + ], + "default": "vscode-lm", + "markdownDescription": "Select the AI service provider to use for generating commit messages. \n\n- **vscode-lm**: Use VS Code's built-in Language Model API (GitHub Copilot) - Default\n- **openai**: Use OpenAI API directly (requires API key)\n- **gemini**: Use Google Gemini API (requires API key)" + }, + "diffy-explain-ai.vscodeLmModel": { + "type": "string", + "enum": [ + "auto", + "copilot-gpt-4o", + "copilot-gpt-4", + "copilot-gpt-4-turbo", + "copilot-gpt-3.5-turbo", + "copilot-gpt-3.5", + "copilot-o1", + "copilot-o1-mini", + "copilot-o1-preview" + ], + "enumDescriptions": [ + "Automatically selects the best available model (recommended)", + "GitHub Copilot GPT-4o model - Latest and most capable", + "GitHub Copilot GPT-4 model - High capability model", + "GitHub Copilot GPT-4 Turbo model - Fast and capable", + "GitHub Copilot GPT-3.5 Turbo model - Fast and efficient", + "GitHub Copilot GPT-3.5 model - Standard capability", + "GitHub Copilot o1 model - Advanced reasoning model", + "GitHub Copilot o1-mini model - Compact reasoning model", + "GitHub Copilot o1-preview model - Preview of o1 capabilities" + ], + "default": "auto", + "markdownDescription": "Select the VS Code Language Model to use when `aiServiceProvider` is set to `vscode-lm`. Requires GitHub Copilot subscription. \n\nAvailable models may vary based on your Copilot subscription tier:\n\n- **auto**: Automatically selects the best available model\n- **copilot-gpt-4o**: Latest GPT-4o model (recommended for most users)\n- **copilot-gpt-4**: GPT-4 model with high reasoning capabilities\n- **copilot-gpt-4-turbo**: Fast GPT-4 Turbo model\n- **copilot-gpt-3.5-turbo**: Fast and efficient GPT-3.5 Turbo\n- **copilot-gpt-3.5**: Standard GPT-3.5 model\n- **copilot-o1**: Advanced o1 reasoning model (may require higher tier)\n- **copilot-o1-mini**: Compact o1-mini model\n- **copilot-o1-preview**: Preview access to o1 capabilities" + }, + "diffy-explain-ai.proxyUrl": { + "type": "string", + "default": "", + "description": "Provide a user-defined URL for the OpenAI API proxy server. Please ensure the server's availability is confirmed. The default value is: https://api.openai.com/v1." + }, + "diffy-explain-ai.openAiKey": { + "type": "string", + "markdownDescription": "Enter your api key from openai.com [Go to API Key Page](https://beta.openai.com/account/api-keys)" + }, + "diffy-explain-ai.model": { + "type": "string", + "default": "gpt-4-turbo", + "markdownDescription": "OpenAI models to use for your prompts. [Documentation](https://beta.openai.com/docs/models/models). \n\n**If you face 400 Bad Request please make sure you are using the right model for your integration method.**" + }, + "diffy-explain-ai.geminiApiKey": { + "type": "string", + "markdownDescription": "Enter your API key from Google AI Studio [Go to API Key Page](https://aistudio.google.com/app/apikey)" + }, + "diffy-explain-ai.geminiModel": { + "type": "string", + "default": "gemini-2.0-flash-exp", + "markdownDescription": "Google Gemini models to use for your prompts. [Documentation](https://ai.google.dev/gemini-api/docs/models/gemini). \n\nPopular models:\n- **gemini-2.0-flash-exp**: Latest experimental model (fast and capable)\n- **gemini-1.5-pro**: High-capability model for complex tasks\n- **gemini-1.5-flash**: Fast and efficient model" + }, + "diffy-explain-ai.temperature": { + "type": "number", + "default": 0.2, + "description": "Temperature is a parameter used in OpenAI's language models to control the level of randomness and creativity in generated text, with higher values making the output more random and lower values making it more deterministic." + }, + "diffy-explain-ai.maxTokens": { + "type": "number", + "default": 196, + "description": "Parameter in OpenAI's language models that limits the maximum length of the generated text output to a specified number of tokens, helping control the length of the response." + }, + "diffy-explain-ai.aiInstructions": { + "type": "string", + "default": "Analyze the provided git diff --staged output, categorize the changes into a conventional commit type (e.g., feat, fix, docs, chore,style), determine if a scope is applicable, and then synthesize a concise commit message that follows the format [optional scope]: [optional body] [optional footer(s)]" + }, + "diffy-explain-ai.commitMessageType": { + "type": "string", + "enum": [ + "conventional", + "gitmoji", + "custom" + ], + "enumDescriptions": [ + "Conventional Commits format: [optional scope]: ", + "Gitmoji format with emoji prefixes: 🎨 :: ", + "Custom user-defined format using a template with placeholders" + ], + "default": "conventional", + "markdownDescription": "Choose the commit message format style:\n\n- **conventional**: Standard Conventional Commits format (e.g., `feat: add new feature`)\n- **gitmoji**: Commits with emoji prefixes (e.g., `✨ feat: add new feature`)\n- **custom**: Fully customizable template with placeholders (configure via Custom Commit Prompt setting)" + }, + "diffy-explain-ai.customCommitPrompt": { + "type": "string", + "default": "Generate a commit message for the following git diff.\n\nRequirements:\n- Maximum subject length: {maxLength} characters\n- Use imperative mood\n- Be concise and clear{bodyInstructions}\n\nReturn ONLY the commit message, no explanations.", + "markdownDescription": "Custom commit message generation prompt when **Commit Message Type** is set to `custom`. \n\nSupported placeholders:\n- `{maxLength}` - Maximum subject line length\n- `{bodyInstructions}` - Auto-filled based on Include Commit Body setting\n- `{locale}` - Language/locale for the message\n- `{diff}` - The actual git diff (automatically appended)\n\nExample template:\n```\nGenerate a {locale} commit message following our team convention:\n- Start with JIRA ticket: [PROJ-XXX]\n- Subject max {maxLength} chars\n- Include impact analysis{bodyInstructions}\n```" + }, + "diffy-explain-ai.includeCommitBody": { + "type": "boolean", + "default": false, + "markdownDescription": "Include a detailed body section in commit messages with bullet points explaining the changes. When enabled, commit messages will have:\n\n```\nfeat: add user authentication\n\n- Implement JWT token generation\n- Add login and registration endpoints\n- Create user model and database schema\n```" + }, + "diffy-explain-ai.excludeFilesFromDiff": { + "type": "array", + "items": { + "type": "string" + }, + "default": [ + "package-lock.json", + "yarn.lock", + "pnpm-lock.yaml", + "*.jpg", + "*.png", + "*.gif", + "*.svg", + "*.ico", + "*.woff", + "*.woff2", + "*.ttf", + "*.eot" + ], + "markdownDescription": "File patterns to exclude from AI analysis when generating commit messages. This helps reduce token usage and improve quality by filtering out:\n\n- Lock files (package-lock.json, yarn.lock)\n- Large binary assets (images, fonts)\n- Generated files\n\nSupports glob patterns like `*.log`, `dist/**`, `**/*.min.js`" + }, + "diffy-explain-ai.maxCommitMessageLength": { + "type": "number", + "default": 72, + "minimum": 50, + "maximum": 200, + "markdownDescription": "Maximum character length for commit message subject line. Following best practices:\n\n- **50**: Strict GitHub standard\n- **72**: Common standard (default)\n- **100**: Relaxed limit" + } + } + } + }, + "scripts": { + "vscode:prepublish": "npm run package", + "compile": "webpack", + "watch": "webpack --watch", + "package": "webpack --mode production --devtool hidden-source-map", + "compile-tests": "tsc -p . --outDir out", + "watch-tests": "tsc -p . -w --outDir out", + "pretest": "pnpm run compile-tests && pnpm run compile && pnpm run lint", + "type-check": "tsc --noEmit", + "lint": "biome check --write .", + "lint:check": "biome check .", + "format": "biome format --write .", + "format:check": "biome format .", + "test": "node ./out/test/runTest.js" + }, + "devDependencies": { + "@biomejs/biome": "2.3.2", + "@types/glob": "^8.0.0", + "@types/mocha": "^10.0.1", + "@types/node": "^22.19.0", + "@types/vscode": "^1.105.0", + "@vscode/test-electron": "^2.2.0", + "glob": "^8.0.3", + "mocha": "^10.1.0", + "ts-loader": "^9.4.1", + "typescript": "^4.9.3", + "undici-types": "^7.16.0", + "webpack": "^5.76.0", + "webpack-cli": "^5.0.0" + }, + "dependencies": { + "@google/genai": "^1.28.0", + "axios": "^1.12.0", + "axios-retry": "^3.5.0", + "openai": "^4.7.0", + "simple-git": "^3.16.0" + } +} diff --git a/tsconfig.json b/tsconfig.json index d2d3666..34ceb49 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -2,7 +2,7 @@ "compilerOptions": { "module": "commonjs", "target": "ES2020", - "lib": ["ES2020"], + "lib": ["ES2020", "DOM"], "sourceMap": true, "rootDir": "src", "strict": true /* enable all strict type-checking options */, From 2ab275fc0249324071d60ddf1410a14340d1b2c4 Mon Sep 17 00:00:00 2001 From: Manu K Date: Mon, 3 Nov 2025 23:24:54 +0530 Subject: [PATCH 6/7] ci(workflows): update supported Node.js versions --- .github/workflows/main.yml | 2 +- .github/workflows/release.yml | 2 +- package.json | 516 +++++++++++++++++----------------- 3 files changed, 260 insertions(+), 260 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 674d701..49879d8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -15,7 +15,7 @@ jobs: strategy: matrix: - node-version: [16.x, 18.x] + node-version: [18.x, 20.x, 22.x] # See supported Node.js release schedule at https://nodejs.org/en/about/releases/ steps: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ca40f00..aaa8b9f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -32,7 +32,7 @@ jobs: - name: Setup Node.js uses: actions/setup-node@v4 with: - node-version: "18.x" + node-version: "22.x" cache: "npm" - name: Install dependencies diff --git a/package.json b/package.json index 35096a2..5420f2c 100644 --- a/package.json +++ b/package.json @@ -1,258 +1,258 @@ -{ - "name": "diffy-explain-ai", - "displayName": "Diffy Commit AI - Generate Your Commit Message", - "description": "Generate Commit Message for You or Explains The Changed Code Using Git Diff And OpenAi In Natural Language", - "version": "1.1.0", - "publisher": "hitclaw", - "engines": { - "vscode": "^1.105.0" - }, - "categories": [ - "SCM Providers" - ], - "repository": { - "url": "https://github.com/Hi7cl4w/diffy-explain-ai.git" - }, - "bugs": { - "url": "hhttps://github.com/Hi7cl4w/diffy-explain-ai/issues" - }, - "keywords": [ - "git", - "generate", - "message", - "commit", - "openai", - "vs-code", - "vscode", - "productivity" - ], - "icon": "icons/icon.png", - "main": "./dist/extension.js", - "contributes": { - "menus": { - "scm/title": [ - { - "command": "diffy-explain-ai.generateCommitMessage", - "when": "scmProvider == git", - "group": "navigation", - "title": "Generate Commit Message" - }, - { - "command": "diffy-explain-ai.generateCommitMessage", - "when": "scmProvider == git", - "group": "inline", - "title": "Generate Commit Message" - }, - { - "command": "diffy-explain-ai.generateCommitMessage", - "when": "scmProvider == git", - "group": "inline", - "title": "Generate Commit Message" - } - ] - }, - "commands": [ - { - "command": "diffy-explain-ai.explainDiffClipboard", - "title": "DIFFY: Explain Changes and Copy to Clipboard", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - }, - { - "command": "diffy-explain-ai.generateCommitMessage", - "title": "DIFFY: Generate Commit Message", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - }, - { - "command": "diffy-explain-ai.generateCommitMessageClipboard", - "title": "DIFFY: Generate Commit Message and Copy to Clipboard", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - }, - { - "command": "diffy-explain-ai.explainAndPreview", - "title": "DIFFY: Explain and Preview", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - } - ], - "configuration": { - "type": "object", - "properties": { - "diffy-explain-ai.aiServiceProvider": { - "type": "string", - "enum": [ - "openai", - "vscode-lm", - "gemini" - ], - "default": "vscode-lm", - "markdownDescription": "Select the AI service provider to use for generating commit messages. \n\n- **vscode-lm**: Use VS Code's built-in Language Model API (GitHub Copilot) - Default\n- **openai**: Use OpenAI API directly (requires API key)\n- **gemini**: Use Google Gemini API (requires API key)" - }, - "diffy-explain-ai.vscodeLmModel": { - "type": "string", - "enum": [ - "auto", - "copilot-gpt-4o", - "copilot-gpt-4", - "copilot-gpt-4-turbo", - "copilot-gpt-3.5-turbo", - "copilot-gpt-3.5", - "copilot-o1", - "copilot-o1-mini", - "copilot-o1-preview" - ], - "enumDescriptions": [ - "Automatically selects the best available model (recommended)", - "GitHub Copilot GPT-4o model - Latest and most capable", - "GitHub Copilot GPT-4 model - High capability model", - "GitHub Copilot GPT-4 Turbo model - Fast and capable", - "GitHub Copilot GPT-3.5 Turbo model - Fast and efficient", - "GitHub Copilot GPT-3.5 model - Standard capability", - "GitHub Copilot o1 model - Advanced reasoning model", - "GitHub Copilot o1-mini model - Compact reasoning model", - "GitHub Copilot o1-preview model - Preview of o1 capabilities" - ], - "default": "auto", - "markdownDescription": "Select the VS Code Language Model to use when `aiServiceProvider` is set to `vscode-lm`. Requires GitHub Copilot subscription. \n\nAvailable models may vary based on your Copilot subscription tier:\n\n- **auto**: Automatically selects the best available model\n- **copilot-gpt-4o**: Latest GPT-4o model (recommended for most users)\n- **copilot-gpt-4**: GPT-4 model with high reasoning capabilities\n- **copilot-gpt-4-turbo**: Fast GPT-4 Turbo model\n- **copilot-gpt-3.5-turbo**: Fast and efficient GPT-3.5 Turbo\n- **copilot-gpt-3.5**: Standard GPT-3.5 model\n- **copilot-o1**: Advanced o1 reasoning model (may require higher tier)\n- **copilot-o1-mini**: Compact o1-mini model\n- **copilot-o1-preview**: Preview access to o1 capabilities" - }, - "diffy-explain-ai.proxyUrl": { - "type": "string", - "default": "", - "description": "Provide a user-defined URL for the OpenAI API proxy server. Please ensure the server's availability is confirmed. The default value is: https://api.openai.com/v1." - }, - "diffy-explain-ai.openAiKey": { - "type": "string", - "markdownDescription": "Enter your api key from openai.com [Go to API Key Page](https://beta.openai.com/account/api-keys)" - }, - "diffy-explain-ai.model": { - "type": "string", - "default": "gpt-4-turbo", - "markdownDescription": "OpenAI models to use for your prompts. [Documentation](https://beta.openai.com/docs/models/models). \n\n**If you face 400 Bad Request please make sure you are using the right model for your integration method.**" - }, - "diffy-explain-ai.geminiApiKey": { - "type": "string", - "markdownDescription": "Enter your API key from Google AI Studio [Go to API Key Page](https://aistudio.google.com/app/apikey)" - }, - "diffy-explain-ai.geminiModel": { - "type": "string", - "default": "gemini-2.0-flash-exp", - "markdownDescription": "Google Gemini models to use for your prompts. [Documentation](https://ai.google.dev/gemini-api/docs/models/gemini). \n\nPopular models:\n- **gemini-2.0-flash-exp**: Latest experimental model (fast and capable)\n- **gemini-1.5-pro**: High-capability model for complex tasks\n- **gemini-1.5-flash**: Fast and efficient model" - }, - "diffy-explain-ai.temperature": { - "type": "number", - "default": 0.2, - "description": "Temperature is a parameter used in OpenAI's language models to control the level of randomness and creativity in generated text, with higher values making the output more random and lower values making it more deterministic." - }, - "diffy-explain-ai.maxTokens": { - "type": "number", - "default": 196, - "description": "Parameter in OpenAI's language models that limits the maximum length of the generated text output to a specified number of tokens, helping control the length of the response." - }, - "diffy-explain-ai.aiInstructions": { - "type": "string", - "default": "Analyze the provided git diff --staged output, categorize the changes into a conventional commit type (e.g., feat, fix, docs, chore,style), determine if a scope is applicable, and then synthesize a concise commit message that follows the format [optional scope]: [optional body] [optional footer(s)]" - }, - "diffy-explain-ai.commitMessageType": { - "type": "string", - "enum": [ - "conventional", - "gitmoji", - "custom" - ], - "enumDescriptions": [ - "Conventional Commits format: [optional scope]: ", - "Gitmoji format with emoji prefixes: 🎨 :: ", - "Custom user-defined format using a template with placeholders" - ], - "default": "conventional", - "markdownDescription": "Choose the commit message format style:\n\n- **conventional**: Standard Conventional Commits format (e.g., `feat: add new feature`)\n- **gitmoji**: Commits with emoji prefixes (e.g., `✨ feat: add new feature`)\n- **custom**: Fully customizable template with placeholders (configure via Custom Commit Prompt setting)" - }, - "diffy-explain-ai.customCommitPrompt": { - "type": "string", - "default": "Generate a commit message for the following git diff.\n\nRequirements:\n- Maximum subject length: {maxLength} characters\n- Use imperative mood\n- Be concise and clear{bodyInstructions}\n\nReturn ONLY the commit message, no explanations.", - "markdownDescription": "Custom commit message generation prompt when **Commit Message Type** is set to `custom`. \n\nSupported placeholders:\n- `{maxLength}` - Maximum subject line length\n- `{bodyInstructions}` - Auto-filled based on Include Commit Body setting\n- `{locale}` - Language/locale for the message\n- `{diff}` - The actual git diff (automatically appended)\n\nExample template:\n```\nGenerate a {locale} commit message following our team convention:\n- Start with JIRA ticket: [PROJ-XXX]\n- Subject max {maxLength} chars\n- Include impact analysis{bodyInstructions}\n```" - }, - "diffy-explain-ai.includeCommitBody": { - "type": "boolean", - "default": false, - "markdownDescription": "Include a detailed body section in commit messages with bullet points explaining the changes. When enabled, commit messages will have:\n\n```\nfeat: add user authentication\n\n- Implement JWT token generation\n- Add login and registration endpoints\n- Create user model and database schema\n```" - }, - "diffy-explain-ai.excludeFilesFromDiff": { - "type": "array", - "items": { - "type": "string" - }, - "default": [ - "package-lock.json", - "yarn.lock", - "pnpm-lock.yaml", - "*.jpg", - "*.png", - "*.gif", - "*.svg", - "*.ico", - "*.woff", - "*.woff2", - "*.ttf", - "*.eot" - ], - "markdownDescription": "File patterns to exclude from AI analysis when generating commit messages. This helps reduce token usage and improve quality by filtering out:\n\n- Lock files (package-lock.json, yarn.lock)\n- Large binary assets (images, fonts)\n- Generated files\n\nSupports glob patterns like `*.log`, `dist/**`, `**/*.min.js`" - }, - "diffy-explain-ai.maxCommitMessageLength": { - "type": "number", - "default": 72, - "minimum": 50, - "maximum": 200, - "markdownDescription": "Maximum character length for commit message subject line. Following best practices:\n\n- **50**: Strict GitHub standard\n- **72**: Common standard (default)\n- **100**: Relaxed limit" - } - } - } - }, - "scripts": { - "vscode:prepublish": "npm run package", - "compile": "webpack", - "watch": "webpack --watch", - "package": "webpack --mode production --devtool hidden-source-map", - "compile-tests": "tsc -p . --outDir out", - "watch-tests": "tsc -p . -w --outDir out", - "pretest": "pnpm run compile-tests && pnpm run compile && pnpm run lint", - "type-check": "tsc --noEmit", - "lint": "biome check --write .", - "lint:check": "biome check .", - "format": "biome format --write .", - "format:check": "biome format .", - "test": "node ./out/test/runTest.js" - }, - "devDependencies": { - "@biomejs/biome": "2.3.2", - "@types/glob": "^8.0.0", - "@types/mocha": "^10.0.1", - "@types/node": "^22.19.0", - "@types/vscode": "^1.105.0", - "@vscode/test-electron": "^2.2.0", - "glob": "^8.0.3", - "mocha": "^10.1.0", - "ts-loader": "^9.4.1", - "typescript": "^4.9.3", - "undici-types": "^7.16.0", - "webpack": "^5.76.0", - "webpack-cli": "^5.0.0" - }, - "dependencies": { - "@google/genai": "^1.28.0", - "axios": "^1.12.0", - "axios-retry": "^3.5.0", - "openai": "^4.7.0", - "simple-git": "^3.16.0" - } -} +{ + "name": "diffy-explain-ai", + "displayName": "Diffy Commit AI - Generate Your Commit Message", + "description": "Generate Commit Message for You or Explains The Changed Code Using Git Diff And OpenAi In Natural Language", + "version": "1.1.0", + "publisher": "hitclaw", + "engines": { + "vscode": "^1.105.0" + }, + "categories": [ + "SCM Providers" + ], + "repository": { + "url": "https://github.com/Hi7cl4w/diffy-explain-ai.git" + }, + "bugs": { + "url": "hhttps://github.com/Hi7cl4w/diffy-explain-ai/issues" + }, + "keywords": [ + "git", + "generate", + "message", + "commit", + "openai", + "vs-code", + "vscode", + "productivity" + ], + "icon": "icons/icon.png", + "main": "./dist/extension.js", + "contributes": { + "menus": { + "scm/title": [ + { + "command": "diffy-explain-ai.generateCommitMessage", + "when": "scmProvider == git", + "group": "navigation", + "title": "Generate Commit Message" + }, + { + "command": "diffy-explain-ai.generateCommitMessage", + "when": "scmProvider == git", + "group": "inline", + "title": "Generate Commit Message" + }, + { + "command": "diffy-explain-ai.generateCommitMessage", + "when": "scmProvider == git", + "group": "inline", + "title": "Generate Commit Message" + } + ] + }, + "commands": [ + { + "command": "diffy-explain-ai.explainDiffClipboard", + "title": "DIFFY: Explain Changes and Copy to Clipboard", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + }, + { + "command": "diffy-explain-ai.generateCommitMessage", + "title": "DIFFY: Generate Commit Message", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + }, + { + "command": "diffy-explain-ai.generateCommitMessageClipboard", + "title": "DIFFY: Generate Commit Message and Copy to Clipboard", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + }, + { + "command": "diffy-explain-ai.explainAndPreview", + "title": "DIFFY: Explain and Preview", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + } + ], + "configuration": { + "type": "object", + "properties": { + "diffy-explain-ai.aiServiceProvider": { + "type": "string", + "enum": [ + "openai", + "vscode-lm", + "gemini" + ], + "default": "vscode-lm", + "markdownDescription": "Select the AI service provider to use for generating commit messages. \n\n- **vscode-lm**: Use VS Code's built-in Language Model API (GitHub Copilot) - Default\n- **openai**: Use OpenAI API directly (requires API key)\n- **gemini**: Use Google Gemini API (requires API key)" + }, + "diffy-explain-ai.vscodeLmModel": { + "type": "string", + "enum": [ + "auto", + "copilot-gpt-4o", + "copilot-gpt-4", + "copilot-gpt-4-turbo", + "copilot-gpt-3.5-turbo", + "copilot-gpt-3.5", + "copilot-o1", + "copilot-o1-mini", + "copilot-o1-preview" + ], + "enumDescriptions": [ + "Automatically selects the best available model (recommended)", + "GitHub Copilot GPT-4o model - Latest and most capable", + "GitHub Copilot GPT-4 model - High capability model", + "GitHub Copilot GPT-4 Turbo model - Fast and capable", + "GitHub Copilot GPT-3.5 Turbo model - Fast and efficient", + "GitHub Copilot GPT-3.5 model - Standard capability", + "GitHub Copilot o1 model - Advanced reasoning model", + "GitHub Copilot o1-mini model - Compact reasoning model", + "GitHub Copilot o1-preview model - Preview of o1 capabilities" + ], + "default": "auto", + "markdownDescription": "Select the VS Code Language Model to use when `aiServiceProvider` is set to `vscode-lm`. Requires GitHub Copilot subscription. \n\nAvailable models may vary based on your Copilot subscription tier:\n\n- **auto**: Automatically selects the best available model\n- **copilot-gpt-4o**: Latest GPT-4o model (recommended for most users)\n- **copilot-gpt-4**: GPT-4 model with high reasoning capabilities\n- **copilot-gpt-4-turbo**: Fast GPT-4 Turbo model\n- **copilot-gpt-3.5-turbo**: Fast and efficient GPT-3.5 Turbo\n- **copilot-gpt-3.5**: Standard GPT-3.5 model\n- **copilot-o1**: Advanced o1 reasoning model (may require higher tier)\n- **copilot-o1-mini**: Compact o1-mini model\n- **copilot-o1-preview**: Preview access to o1 capabilities" + }, + "diffy-explain-ai.proxyUrl": { + "type": "string", + "default": "", + "description": "Provide a user-defined URL for the OpenAI API proxy server. Please ensure the server's availability is confirmed. The default value is: https://api.openai.com/v1." + }, + "diffy-explain-ai.openAiKey": { + "type": "string", + "markdownDescription": "Enter your api key from openai.com [Go to API Key Page](https://beta.openai.com/account/api-keys)" + }, + "diffy-explain-ai.model": { + "type": "string", + "default": "gpt-4-turbo", + "markdownDescription": "OpenAI models to use for your prompts. [Documentation](https://beta.openai.com/docs/models/models). \n\n**If you face 400 Bad Request please make sure you are using the right model for your integration method.**" + }, + "diffy-explain-ai.geminiApiKey": { + "type": "string", + "markdownDescription": "Enter your API key from Google AI Studio [Go to API Key Page](https://aistudio.google.com/app/apikey)" + }, + "diffy-explain-ai.geminiModel": { + "type": "string", + "default": "gemini-2.0-flash-exp", + "markdownDescription": "Google Gemini models to use for your prompts. [Documentation](https://ai.google.dev/gemini-api/docs/models/gemini). \n\nPopular models:\n- **gemini-2.0-flash-exp**: Latest experimental model (fast and capable)\n- **gemini-1.5-pro**: High-capability model for complex tasks\n- **gemini-1.5-flash**: Fast and efficient model" + }, + "diffy-explain-ai.temperature": { + "type": "number", + "default": 0.2, + "description": "Temperature is a parameter used in OpenAI's language models to control the level of randomness and creativity in generated text, with higher values making the output more random and lower values making it more deterministic." + }, + "diffy-explain-ai.maxTokens": { + "type": "number", + "default": 196, + "description": "Parameter in OpenAI's language models that limits the maximum length of the generated text output to a specified number of tokens, helping control the length of the response." + }, + "diffy-explain-ai.aiInstructions": { + "type": "string", + "default": "Analyze the provided git diff --staged output, categorize the changes into a conventional commit type (e.g., feat, fix, docs, chore,style), determine if a scope is applicable, and then synthesize a concise commit message that follows the format [optional scope]: [optional body] [optional footer(s)]" + }, + "diffy-explain-ai.commitMessageType": { + "type": "string", + "enum": [ + "conventional", + "gitmoji", + "custom" + ], + "enumDescriptions": [ + "Conventional Commits format: [optional scope]: ", + "Gitmoji format with emoji prefixes: 🎨 :: ", + "Custom user-defined format using a template with placeholders" + ], + "default": "conventional", + "markdownDescription": "Choose the commit message format style:\n\n- **conventional**: Standard Conventional Commits format (e.g., `feat: add new feature`)\n- **gitmoji**: Commits with emoji prefixes (e.g., `✨ feat: add new feature`)\n- **custom**: Fully customizable template with placeholders (configure via Custom Commit Prompt setting)" + }, + "diffy-explain-ai.customCommitPrompt": { + "type": "string", + "default": "Generate a commit message for the following git diff.\n\nRequirements:\n- Maximum subject length: {maxLength} characters\n- Use imperative mood\n- Be concise and clear{bodyInstructions}\n\nReturn ONLY the commit message, no explanations.", + "markdownDescription": "Custom commit message generation prompt when **Commit Message Type** is set to `custom`. \n\nSupported placeholders:\n- `{maxLength}` - Maximum subject line length\n- `{bodyInstructions}` - Auto-filled based on Include Commit Body setting\n- `{locale}` - Language/locale for the message\n- `{diff}` - The actual git diff (automatically appended)\n\nExample template:\n```\nGenerate a {locale} commit message following our team convention:\n- Start with JIRA ticket: [PROJ-XXX]\n- Subject max {maxLength} chars\n- Include impact analysis{bodyInstructions}\n```" + }, + "diffy-explain-ai.includeCommitBody": { + "type": "boolean", + "default": false, + "markdownDescription": "Include a detailed body section in commit messages with bullet points explaining the changes. When enabled, commit messages will have:\n\n```\nfeat: add user authentication\n\n- Implement JWT token generation\n- Add login and registration endpoints\n- Create user model and database schema\n```" + }, + "diffy-explain-ai.excludeFilesFromDiff": { + "type": "array", + "items": { + "type": "string" + }, + "default": [ + "package-lock.json", + "yarn.lock", + "pnpm-lock.yaml", + "*.jpg", + "*.png", + "*.gif", + "*.svg", + "*.ico", + "*.woff", + "*.woff2", + "*.ttf", + "*.eot" + ], + "markdownDescription": "File patterns to exclude from AI analysis when generating commit messages. This helps reduce token usage and improve quality by filtering out:\n\n- Lock files (package-lock.json, yarn.lock)\n- Large binary assets (images, fonts)\n- Generated files\n\nSupports glob patterns like `*.log`, `dist/**`, `**/*.min.js`" + }, + "diffy-explain-ai.maxCommitMessageLength": { + "type": "number", + "default": 72, + "minimum": 50, + "maximum": 200, + "markdownDescription": "Maximum character length for commit message subject line. Following best practices:\n\n- **50**: Strict GitHub standard\n- **72**: Common standard (default)\n- **100**: Relaxed limit" + } + } + } + }, + "scripts": { + "vscode:prepublish": "npm run package", + "compile": "webpack", + "watch": "webpack --watch", + "package": "webpack --mode production --devtool hidden-source-map", + "compile-tests": "tsc -p . --outDir out", + "watch-tests": "tsc -p . -w --outDir out", + "pretest": "pnpm run compile-tests && pnpm run compile && pnpm run lint", + "type-check": "tsc --noEmit", + "lint": "biome check --write .", + "lint:check": "biome check .", + "format": "biome format --write .", + "format:check": "biome format .", + "test": "node ./out/test/runTest.js" + }, + "devDependencies": { + "@biomejs/biome": "2.3.2", + "@types/glob": "^8.0.0", + "@types/mocha": "^10.0.1", + "@types/node": "^22.19.0", + "@types/vscode": "^1.105.0", + "@vscode/test-electron": "^2.2.0", + "glob": "^8.0.3", + "mocha": "^10.1.0", + "ts-loader": "^9.4.1", + "typescript": "^4.9.3", + "undici-types": "^7.16.0", + "webpack": "^5.76.0", + "webpack-cli": "^5.0.0" + }, + "dependencies": { + "@google/genai": "^1.28.0", + "axios": "^1.12.0", + "axios-retry": "^3.5.0", + "openai": "^4.7.0", + "simple-git": "^3.16.0" + } +} From c7d74aae457ed8039b81477c22b8159ef90abf6a Mon Sep 17 00:00:00 2001 From: Manu K Date: Mon, 3 Nov 2025 23:25:56 +0530 Subject: [PATCH 7/7] style: reformat package.json with proper indentation --- package.json | 516 +++++++++++++++++++++++++-------------------------- 1 file changed, 258 insertions(+), 258 deletions(-) diff --git a/package.json b/package.json index 5420f2c..1c7f67a 100644 --- a/package.json +++ b/package.json @@ -1,258 +1,258 @@ -{ - "name": "diffy-explain-ai", - "displayName": "Diffy Commit AI - Generate Your Commit Message", - "description": "Generate Commit Message for You or Explains The Changed Code Using Git Diff And OpenAi In Natural Language", - "version": "1.1.0", - "publisher": "hitclaw", - "engines": { - "vscode": "^1.105.0" - }, - "categories": [ - "SCM Providers" - ], - "repository": { - "url": "https://github.com/Hi7cl4w/diffy-explain-ai.git" - }, - "bugs": { - "url": "hhttps://github.com/Hi7cl4w/diffy-explain-ai/issues" - }, - "keywords": [ - "git", - "generate", - "message", - "commit", - "openai", - "vs-code", - "vscode", - "productivity" - ], - "icon": "icons/icon.png", - "main": "./dist/extension.js", - "contributes": { - "menus": { - "scm/title": [ - { - "command": "diffy-explain-ai.generateCommitMessage", - "when": "scmProvider == git", - "group": "navigation", - "title": "Generate Commit Message" - }, - { - "command": "diffy-explain-ai.generateCommitMessage", - "when": "scmProvider == git", - "group": "inline", - "title": "Generate Commit Message" - }, - { - "command": "diffy-explain-ai.generateCommitMessage", - "when": "scmProvider == git", - "group": "inline", - "title": "Generate Commit Message" - } - ] - }, - "commands": [ - { - "command": "diffy-explain-ai.explainDiffClipboard", - "title": "DIFFY: Explain Changes and Copy to Clipboard", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - }, - { - "command": "diffy-explain-ai.generateCommitMessage", - "title": "DIFFY: Generate Commit Message", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - }, - { - "command": "diffy-explain-ai.generateCommitMessageClipboard", - "title": "DIFFY: Generate Commit Message and Copy to Clipboard", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - }, - { - "command": "diffy-explain-ai.explainAndPreview", - "title": "DIFFY: Explain and Preview", - "icon": { - "dark": "icons/magic.svg", - "light": "icons/magic-light.svg" - } - } - ], - "configuration": { - "type": "object", - "properties": { - "diffy-explain-ai.aiServiceProvider": { - "type": "string", - "enum": [ - "openai", - "vscode-lm", - "gemini" - ], - "default": "vscode-lm", - "markdownDescription": "Select the AI service provider to use for generating commit messages. \n\n- **vscode-lm**: Use VS Code's built-in Language Model API (GitHub Copilot) - Default\n- **openai**: Use OpenAI API directly (requires API key)\n- **gemini**: Use Google Gemini API (requires API key)" - }, - "diffy-explain-ai.vscodeLmModel": { - "type": "string", - "enum": [ - "auto", - "copilot-gpt-4o", - "copilot-gpt-4", - "copilot-gpt-4-turbo", - "copilot-gpt-3.5-turbo", - "copilot-gpt-3.5", - "copilot-o1", - "copilot-o1-mini", - "copilot-o1-preview" - ], - "enumDescriptions": [ - "Automatically selects the best available model (recommended)", - "GitHub Copilot GPT-4o model - Latest and most capable", - "GitHub Copilot GPT-4 model - High capability model", - "GitHub Copilot GPT-4 Turbo model - Fast and capable", - "GitHub Copilot GPT-3.5 Turbo model - Fast and efficient", - "GitHub Copilot GPT-3.5 model - Standard capability", - "GitHub Copilot o1 model - Advanced reasoning model", - "GitHub Copilot o1-mini model - Compact reasoning model", - "GitHub Copilot o1-preview model - Preview of o1 capabilities" - ], - "default": "auto", - "markdownDescription": "Select the VS Code Language Model to use when `aiServiceProvider` is set to `vscode-lm`. Requires GitHub Copilot subscription. \n\nAvailable models may vary based on your Copilot subscription tier:\n\n- **auto**: Automatically selects the best available model\n- **copilot-gpt-4o**: Latest GPT-4o model (recommended for most users)\n- **copilot-gpt-4**: GPT-4 model with high reasoning capabilities\n- **copilot-gpt-4-turbo**: Fast GPT-4 Turbo model\n- **copilot-gpt-3.5-turbo**: Fast and efficient GPT-3.5 Turbo\n- **copilot-gpt-3.5**: Standard GPT-3.5 model\n- **copilot-o1**: Advanced o1 reasoning model (may require higher tier)\n- **copilot-o1-mini**: Compact o1-mini model\n- **copilot-o1-preview**: Preview access to o1 capabilities" - }, - "diffy-explain-ai.proxyUrl": { - "type": "string", - "default": "", - "description": "Provide a user-defined URL for the OpenAI API proxy server. Please ensure the server's availability is confirmed. The default value is: https://api.openai.com/v1." - }, - "diffy-explain-ai.openAiKey": { - "type": "string", - "markdownDescription": "Enter your api key from openai.com [Go to API Key Page](https://beta.openai.com/account/api-keys)" - }, - "diffy-explain-ai.model": { - "type": "string", - "default": "gpt-4-turbo", - "markdownDescription": "OpenAI models to use for your prompts. [Documentation](https://beta.openai.com/docs/models/models). \n\n**If you face 400 Bad Request please make sure you are using the right model for your integration method.**" - }, - "diffy-explain-ai.geminiApiKey": { - "type": "string", - "markdownDescription": "Enter your API key from Google AI Studio [Go to API Key Page](https://aistudio.google.com/app/apikey)" - }, - "diffy-explain-ai.geminiModel": { - "type": "string", - "default": "gemini-2.0-flash-exp", - "markdownDescription": "Google Gemini models to use for your prompts. [Documentation](https://ai.google.dev/gemini-api/docs/models/gemini). \n\nPopular models:\n- **gemini-2.0-flash-exp**: Latest experimental model (fast and capable)\n- **gemini-1.5-pro**: High-capability model for complex tasks\n- **gemini-1.5-flash**: Fast and efficient model" - }, - "diffy-explain-ai.temperature": { - "type": "number", - "default": 0.2, - "description": "Temperature is a parameter used in OpenAI's language models to control the level of randomness and creativity in generated text, with higher values making the output more random and lower values making it more deterministic." - }, - "diffy-explain-ai.maxTokens": { - "type": "number", - "default": 196, - "description": "Parameter in OpenAI's language models that limits the maximum length of the generated text output to a specified number of tokens, helping control the length of the response." - }, - "diffy-explain-ai.aiInstructions": { - "type": "string", - "default": "Analyze the provided git diff --staged output, categorize the changes into a conventional commit type (e.g., feat, fix, docs, chore,style), determine if a scope is applicable, and then synthesize a concise commit message that follows the format [optional scope]: [optional body] [optional footer(s)]" - }, - "diffy-explain-ai.commitMessageType": { - "type": "string", - "enum": [ - "conventional", - "gitmoji", - "custom" - ], - "enumDescriptions": [ - "Conventional Commits format: [optional scope]: ", - "Gitmoji format with emoji prefixes: 🎨 :: ", - "Custom user-defined format using a template with placeholders" - ], - "default": "conventional", - "markdownDescription": "Choose the commit message format style:\n\n- **conventional**: Standard Conventional Commits format (e.g., `feat: add new feature`)\n- **gitmoji**: Commits with emoji prefixes (e.g., `✨ feat: add new feature`)\n- **custom**: Fully customizable template with placeholders (configure via Custom Commit Prompt setting)" - }, - "diffy-explain-ai.customCommitPrompt": { - "type": "string", - "default": "Generate a commit message for the following git diff.\n\nRequirements:\n- Maximum subject length: {maxLength} characters\n- Use imperative mood\n- Be concise and clear{bodyInstructions}\n\nReturn ONLY the commit message, no explanations.", - "markdownDescription": "Custom commit message generation prompt when **Commit Message Type** is set to `custom`. \n\nSupported placeholders:\n- `{maxLength}` - Maximum subject line length\n- `{bodyInstructions}` - Auto-filled based on Include Commit Body setting\n- `{locale}` - Language/locale for the message\n- `{diff}` - The actual git diff (automatically appended)\n\nExample template:\n```\nGenerate a {locale} commit message following our team convention:\n- Start with JIRA ticket: [PROJ-XXX]\n- Subject max {maxLength} chars\n- Include impact analysis{bodyInstructions}\n```" - }, - "diffy-explain-ai.includeCommitBody": { - "type": "boolean", - "default": false, - "markdownDescription": "Include a detailed body section in commit messages with bullet points explaining the changes. When enabled, commit messages will have:\n\n```\nfeat: add user authentication\n\n- Implement JWT token generation\n- Add login and registration endpoints\n- Create user model and database schema\n```" - }, - "diffy-explain-ai.excludeFilesFromDiff": { - "type": "array", - "items": { - "type": "string" - }, - "default": [ - "package-lock.json", - "yarn.lock", - "pnpm-lock.yaml", - "*.jpg", - "*.png", - "*.gif", - "*.svg", - "*.ico", - "*.woff", - "*.woff2", - "*.ttf", - "*.eot" - ], - "markdownDescription": "File patterns to exclude from AI analysis when generating commit messages. This helps reduce token usage and improve quality by filtering out:\n\n- Lock files (package-lock.json, yarn.lock)\n- Large binary assets (images, fonts)\n- Generated files\n\nSupports glob patterns like `*.log`, `dist/**`, `**/*.min.js`" - }, - "diffy-explain-ai.maxCommitMessageLength": { - "type": "number", - "default": 72, - "minimum": 50, - "maximum": 200, - "markdownDescription": "Maximum character length for commit message subject line. Following best practices:\n\n- **50**: Strict GitHub standard\n- **72**: Common standard (default)\n- **100**: Relaxed limit" - } - } - } - }, - "scripts": { - "vscode:prepublish": "npm run package", - "compile": "webpack", - "watch": "webpack --watch", - "package": "webpack --mode production --devtool hidden-source-map", - "compile-tests": "tsc -p . --outDir out", - "watch-tests": "tsc -p . -w --outDir out", - "pretest": "pnpm run compile-tests && pnpm run compile && pnpm run lint", - "type-check": "tsc --noEmit", - "lint": "biome check --write .", - "lint:check": "biome check .", - "format": "biome format --write .", - "format:check": "biome format .", - "test": "node ./out/test/runTest.js" - }, - "devDependencies": { - "@biomejs/biome": "2.3.2", - "@types/glob": "^8.0.0", - "@types/mocha": "^10.0.1", - "@types/node": "^22.19.0", - "@types/vscode": "^1.105.0", - "@vscode/test-electron": "^2.2.0", - "glob": "^8.0.3", - "mocha": "^10.1.0", - "ts-loader": "^9.4.1", - "typescript": "^4.9.3", - "undici-types": "^7.16.0", - "webpack": "^5.76.0", - "webpack-cli": "^5.0.0" - }, - "dependencies": { - "@google/genai": "^1.28.0", - "axios": "^1.12.0", - "axios-retry": "^3.5.0", - "openai": "^4.7.0", - "simple-git": "^3.16.0" - } -} +{ + "name": "diffy-explain-ai", + "displayName": "Diffy Commit AI - Generate Your Commit Message", + "description": "Generate Commit Message for You or Explains The Changed Code Using Git Diff And OpenAi In Natural Language", + "version": "1.1.0", + "publisher": "hitclaw", + "engines": { + "vscode": "^1.105.0" + }, + "categories": [ + "SCM Providers" + ], + "repository": { + "url": "https://github.com/Hi7cl4w/diffy-explain-ai.git" + }, + "bugs": { + "url": "https://github.com/Hi7cl4w/diffy-explain-ai/issues" + }, + "keywords": [ + "git", + "generate", + "message", + "commit", + "openai", + "vs-code", + "vscode", + "productivity" + ], + "icon": "icons/icon.png", + "main": "./dist/extension.js", + "contributes": { + "menus": { + "scm/title": [ + { + "command": "diffy-explain-ai.generateCommitMessage", + "when": "scmProvider == git", + "group": "navigation", + "title": "Generate Commit Message" + }, + { + "command": "diffy-explain-ai.generateCommitMessage", + "when": "scmProvider == git", + "group": "inline", + "title": "Generate Commit Message" + }, + { + "command": "diffy-explain-ai.generateCommitMessage", + "when": "scmProvider == git", + "group": "inline", + "title": "Generate Commit Message" + } + ] + }, + "commands": [ + { + "command": "diffy-explain-ai.explainDiffClipboard", + "title": "DIFFY: Explain Changes and Copy to Clipboard", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + }, + { + "command": "diffy-explain-ai.generateCommitMessage", + "title": "DIFFY: Generate Commit Message", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + }, + { + "command": "diffy-explain-ai.generateCommitMessageClipboard", + "title": "DIFFY: Generate Commit Message and Copy to Clipboard", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + }, + { + "command": "diffy-explain-ai.explainAndPreview", + "title": "DIFFY: Explain and Preview", + "icon": { + "dark": "icons/magic.svg", + "light": "icons/magic-light.svg" + } + } + ], + "configuration": { + "type": "object", + "properties": { + "diffy-explain-ai.aiServiceProvider": { + "type": "string", + "enum": [ + "openai", + "vscode-lm", + "gemini" + ], + "default": "vscode-lm", + "markdownDescription": "Select the AI service provider to use for generating commit messages. \n\n- **vscode-lm**: Use VS Code's built-in Language Model API (GitHub Copilot) - Default\n- **openai**: Use OpenAI API directly (requires API key)\n- **gemini**: Use Google Gemini API (requires API key)" + }, + "diffy-explain-ai.vscodeLmModel": { + "type": "string", + "enum": [ + "auto", + "copilot-gpt-4o", + "copilot-gpt-4", + "copilot-gpt-4-turbo", + "copilot-gpt-3.5-turbo", + "copilot-gpt-3.5", + "copilot-o1", + "copilot-o1-mini", + "copilot-o1-preview" + ], + "enumDescriptions": [ + "Automatically selects the best available model (recommended)", + "GitHub Copilot GPT-4o model - Latest and most capable", + "GitHub Copilot GPT-4 model - High capability model", + "GitHub Copilot GPT-4 Turbo model - Fast and capable", + "GitHub Copilot GPT-3.5 Turbo model - Fast and efficient", + "GitHub Copilot GPT-3.5 model - Standard capability", + "GitHub Copilot o1 model - Advanced reasoning model", + "GitHub Copilot o1-mini model - Compact reasoning model", + "GitHub Copilot o1-preview model - Preview of o1 capabilities" + ], + "default": "auto", + "markdownDescription": "Select the VS Code Language Model to use when `aiServiceProvider` is set to `vscode-lm`. Requires GitHub Copilot subscription. \n\nAvailable models may vary based on your Copilot subscription tier:\n\n- **auto**: Automatically selects the best available model\n- **copilot-gpt-4o**: Latest GPT-4o model (recommended for most users)\n- **copilot-gpt-4**: GPT-4 model with high reasoning capabilities\n- **copilot-gpt-4-turbo**: Fast GPT-4 Turbo model\n- **copilot-gpt-3.5-turbo**: Fast and efficient GPT-3.5 Turbo\n- **copilot-gpt-3.5**: Standard GPT-3.5 model\n- **copilot-o1**: Advanced o1 reasoning model (may require higher tier)\n- **copilot-o1-mini**: Compact o1-mini model\n- **copilot-o1-preview**: Preview access to o1 capabilities" + }, + "diffy-explain-ai.proxyUrl": { + "type": "string", + "default": "", + "description": "Provide a user-defined URL for the OpenAI API proxy server. Please ensure the server's availability is confirmed. The default value is: https://api.openai.com/v1." + }, + "diffy-explain-ai.openAiKey": { + "type": "string", + "markdownDescription": "Enter your api key from openai.com [Go to API Key Page](https://beta.openai.com/account/api-keys)" + }, + "diffy-explain-ai.model": { + "type": "string", + "default": "gpt-4-turbo", + "markdownDescription": "OpenAI models to use for your prompts. [Documentation](https://beta.openai.com/docs/models/models). \n\n**If you face 400 Bad Request please make sure you are using the right model for your integration method.**" + }, + "diffy-explain-ai.geminiApiKey": { + "type": "string", + "markdownDescription": "Enter your API key from Google AI Studio [Go to API Key Page](https://aistudio.google.com/app/apikey)" + }, + "diffy-explain-ai.geminiModel": { + "type": "string", + "default": "gemini-2.0-flash-exp", + "markdownDescription": "Google Gemini models to use for your prompts. [Documentation](https://ai.google.dev/gemini-api/docs/models/gemini). \n\nPopular models:\n- **gemini-2.0-flash-exp**: Latest experimental model (fast and capable)\n- **gemini-1.5-pro**: High-capability model for complex tasks\n- **gemini-1.5-flash**: Fast and efficient model" + }, + "diffy-explain-ai.temperature": { + "type": "number", + "default": 0.2, + "description": "Temperature is a parameter used in OpenAI's language models to control the level of randomness and creativity in generated text, with higher values making the output more random and lower values making it more deterministic." + }, + "diffy-explain-ai.maxTokens": { + "type": "number", + "default": 196, + "description": "Parameter in OpenAI's language models that limits the maximum length of the generated text output to a specified number of tokens, helping control the length of the response." + }, + "diffy-explain-ai.aiInstructions": { + "type": "string", + "default": "Analyze the provided git diff --staged output, categorize the changes into a conventional commit type (e.g., feat, fix, docs, chore,style), determine if a scope is applicable, and then synthesize a concise commit message that follows the format [optional scope]: [optional body] [optional footer(s)]" + }, + "diffy-explain-ai.commitMessageType": { + "type": "string", + "enum": [ + "conventional", + "gitmoji", + "custom" + ], + "enumDescriptions": [ + "Conventional Commits format: [optional scope]: ", + "Gitmoji format with emoji prefixes: 🎨 :: ", + "Custom user-defined format using a template with placeholders" + ], + "default": "conventional", + "markdownDescription": "Choose the commit message format style:\n\n- **conventional**: Standard Conventional Commits format (e.g., `feat: add new feature`)\n- **gitmoji**: Commits with emoji prefixes (e.g., `✨ feat: add new feature`)\n- **custom**: Fully customizable template with placeholders (configure via Custom Commit Prompt setting)" + }, + "diffy-explain-ai.customCommitPrompt": { + "type": "string", + "default": "Generate a commit message for the following git diff.\n\nRequirements:\n- Maximum subject length: {maxLength} characters\n- Use imperative mood\n- Be concise and clear{bodyInstructions}\n\nReturn ONLY the commit message, no explanations.", + "markdownDescription": "Custom commit message generation prompt when **Commit Message Type** is set to `custom`. \n\nSupported placeholders:\n- `{maxLength}` - Maximum subject line length\n- `{bodyInstructions}` - Auto-filled based on Include Commit Body setting\n- `{locale}` - Language/locale for the message\n- `{diff}` - The actual git diff (automatically appended)\n\nExample template:\n```\nGenerate a {locale} commit message following our team convention:\n- Start with JIRA ticket: [PROJ-XXX]\n- Subject max {maxLength} chars\n- Include impact analysis{bodyInstructions}\n```" + }, + "diffy-explain-ai.includeCommitBody": { + "type": "boolean", + "default": false, + "markdownDescription": "Include a detailed body section in commit messages with bullet points explaining the changes. When enabled, commit messages will have:\n\n```\nfeat: add user authentication\n\n- Implement JWT token generation\n- Add login and registration endpoints\n- Create user model and database schema\n```" + }, + "diffy-explain-ai.excludeFilesFromDiff": { + "type": "array", + "items": { + "type": "string" + }, + "default": [ + "package-lock.json", + "yarn.lock", + "pnpm-lock.yaml", + "*.jpg", + "*.png", + "*.gif", + "*.svg", + "*.ico", + "*.woff", + "*.woff2", + "*.ttf", + "*.eot" + ], + "markdownDescription": "File patterns to exclude from AI analysis when generating commit messages. This helps reduce token usage and improve quality by filtering out:\n\n- Lock files (package-lock.json, yarn.lock)\n- Large binary assets (images, fonts)\n- Generated files\n\nSupports glob patterns like `*.log`, `dist/**`, `**/*.min.js`" + }, + "diffy-explain-ai.maxCommitMessageLength": { + "type": "number", + "default": 72, + "minimum": 50, + "maximum": 200, + "markdownDescription": "Maximum character length for commit message subject line. Following best practices:\n\n- **50**: Strict GitHub standard\n- **72**: Common standard (default)\n- **100**: Relaxed limit" + } + } + } + }, + "scripts": { + "vscode:prepublish": "npm run package", + "compile": "webpack", + "watch": "webpack --watch", + "package": "webpack --mode production --devtool hidden-source-map", + "compile-tests": "tsc -p . --outDir out", + "watch-tests": "tsc -p . -w --outDir out", + "pretest": "pnpm run compile-tests && pnpm run compile && pnpm run lint", + "type-check": "tsc --noEmit", + "lint": "biome check --write .", + "lint:check": "biome check .", + "format": "biome format --write .", + "format:check": "biome format .", + "test": "node ./out/test/runTest.js" + }, + "devDependencies": { + "@biomejs/biome": "2.3.2", + "@types/glob": "^8.0.0", + "@types/mocha": "^10.0.1", + "@types/node": "^22.19.0", + "@types/vscode": "^1.105.0", + "@vscode/test-electron": "^2.2.0", + "glob": "^8.0.3", + "mocha": "^10.1.0", + "ts-loader": "^9.4.1", + "typescript": "^4.9.3", + "undici-types": "^7.16.0", + "webpack": "^5.76.0", + "webpack-cli": "^5.0.0" + }, + "dependencies": { + "@google/genai": "^1.28.0", + "axios": "^1.12.0", + "axios-retry": "^3.5.0", + "openai": "^4.7.0", + "simple-git": "^3.16.0" + } +}