diff --git a/nvim/.config/nvim/lazy-lock.json b/nvim/.config/nvim/lazy-lock.json index 10f302f..cf64526 100644 --- a/nvim/.config/nvim/lazy-lock.json +++ b/nvim/.config/nvim/lazy-lock.json @@ -8,9 +8,9 @@ "cmp-calc": { "branch": "main", "commit": "5947b412da67306c5b68698a02a846760059be2e" }, "cmp-pandoc.nvim": { "branch": "main", "commit": "30faa4456a7643c4cb02d8fa18438fd484ed7602" }, "cmp-spell": { "branch": "master", "commit": "694a4e50809d6d645c1ea29015dad0c293f019d6" }, - "codecompanion-gitcommit.nvim": { "branch": "main", "commit": "0ea26d93321e259dbb3766bf7f845ff02284e220" }, - "codecompanion-history.nvim": { "branch": "main", "commit": "5442513f1303884079c8f13cf8b75da44a3db679" }, - "codecompanion.nvim": { "branch": "main", "commit": "7ae585e1c868edb523cbb15c49fd15bc3def1261" }, + "codecompanion-gitcommit.nvim": { "branch": "main", "commit": "e237b9901d64074fa84f74c1b20892303e3e1830" }, + "codecompanion-history.nvim": { "branch": "main", "commit": "b9f1afb77f1a8805e686f89ac38338a9ca588579" }, + "codecompanion.nvim": { "branch": "main", "commit": "76f1c1aaedbb159256dbc64705cd34f447046d64" }, "conform.nvim": { "branch": "master", "commit": "a6f5bdb78caa305496357d17e962bbc4c0b392e2" }, "copilot-lualine": { "branch": "main", "commit": "6bc29ba1fcf8f0f9ba1f0eacec2f178d9be49333" }, "copilot.lua": { "branch": "master", "commit": "c1bb86abbed1a52a11ab3944ef00c8410520543d" }, diff --git a/nvim/.config/nvim/lua/plugins/llm.lua b/nvim/.config/nvim/lua/plugins/llm.lua index 6fd15da..4f41c36 100644 --- a/nvim/.config/nvim/lua/plugins/llm.lua +++ b/nvim/.config/nvim/lua/plugins/llm.lua @@ -58,43 +58,56 @@ return { opts = { strategies = { chat = { adapter = "groq" }, - inline = { adapter = "groq" }, + inline = { adapter = "copilot" }, + cmd = { adapter = "groq" }, }, adapters = { - groq = function() - return require("codecompanion.adapters").extend("openai", { - env = { - api_key = "GROQ_API_KEY", - }, - name = "Groq", - url = "https://api.groq.com/openai/v1/chat/completions", - schema = { - model = { - default = "llama-3.1-8b-instant", - choices = { - -- production models - "llama-3.3-70b-versatile", - "llama-3.1-8b-instant", - "moonshotai/kimi-k2-instruct", - "meta-llama/llama-guard-4-12b", - "openai/gpt-oss-120b", - "openai/gpt-oss-20b", - -- preview models - "meta-llama/llama-4-maverick-17b-128e-instruct", - "meta-llama/llama-4-scout-17b-16e-instruct", - "deepseek-r1-distill-llama-70b", - "qwen/qwen3-32b", + http = { + opts = { + show_defaults = true, -- TODO: set to false to only enable working, but does not show copilot then + }, + groq = function() + return require("codecompanion.adapters").extend("openai", { + env = { + api_key = "GROQ_API_KEY", + }, + name = "Groq", + url = "https://api.groq.com/openai/v1/chat/completions", + schema = { + model = { + default = "llama-3.1-8b-instant", + choices = { + -- production models + "llama-3.3-70b-versatile", + "llama-3.1-8b-instant", + "moonshotai/kimi-k2-instruct", + "meta-llama/llama-guard-4-12b", + "openai/gpt-oss-120b", + "openai/gpt-oss-20b", + -- preview models + "meta-llama/llama-4-maverick-17b-128e-instruct", + "meta-llama/llama-4-scout-17b-16e-instruct", + "deepseek-r1-distill-llama-70b", + "qwen/qwen3-32b", + }, }, }, - }, - max_tokens = { - default = 4096, - }, - temperature = { - default = 1, - }, - }) - end, + max_tokens = { + default = 4096, + }, + temperature = { + default = 1, + }, + }) + end, + gemini = function() + return require("codecompanion.adapters").extend("gemini", { + env = { + api_key = "GEMINI_API_KEY", + }, + }) + end, + }, }, display = { action_palette = {