olimorris / codecompanion.nvim

✨ AI-powered coding, seamlessly in Neovim. Supports Anthropic, Copilot, Gemini, Ollama, OpenAI and xAI LLMs
MIT License
1.32k stars 89 forks source link

[Bug]: Error Switching Adapters via `ga` Keymapping After Refactor Commit #299

Closed bassamsdata closed 1 month ago

bassamsdata commented 1 month ago

Your minimal.lua config


---@diagnostic disable: missing-fields

--NOTE: Set config path to enable the copilot adapter to work.
--It will search the follwoing paths for the for copilot token:
--  - "$CODECOMPANION_TOKEN_PATH/github-copilot/hosts.json"
--  - "$CODECOMPANION_TOKEN_PATH/github-copilot/apps.json"
vim.env["CODECOMPANION_TOKEN_PATH"] = vim.fn.expand("~/.config")

vim.env.LAZY_STDPATH = ".repro"
load(vim.fn.system("curl -s https://raw.githubusercontent.com/folke/lazy.nvim/main/bootstrap.lua"))()

-- Your CodeCompanion setup
local plugins = {
{
"olimorris/codecompanion.nvim",
dependencies = {
  { "nvim-treesitter/nvim-treesitter", build = ":TSUpdate" },
  { "nvim-lua/plenary.nvim" },
  { "hrsh7th/nvim-cmp" },
  { "stevearc/dressing.nvim", opts = {} },
},
opts = {
  --Refer to: https://github.com/olimorris/codecompanion.nvim/blob/main/lua/codecompanion/config.lua
  adapters = {
    groq = function()
      return require("codecompanion.adapters").extend("openai", {
        env = {
          api_key = "GROQ_API_KEY",
        },
        name = "Groq",
        url = "https://api.groq.com/openai/v1/chat/completions",
        schema = {
          model = {
            default = "llama-3.2-11b-text-preview",
            choices = {
              "llama-3.2-90b-text-preview",
              "llama-3.2-11b-text-preview",
              "mixtral-8x7b-32768",
              "llama-3.1-70b-versatile",
            },
          },
        },
        max_tokens = {
          default = 4096,
        },
        temperature = {
          default = 1,
        },
        handlers = {
          form_messages = function(self, messages)
            for i, msg in ipairs(messages) do
              -- Remove 'id' and 'opts' properties from all messages
              msg.id = nil
              msg.opts = nil

              -- Ensure 'name' is a string if present, otherwise remove it
              if msg.name then
                msg.name = tostring(msg.name)
              else
                msg.name = nil
              end

              -- Ensure only supported properties are present
              local supported_props = { role = true, content = true, name = true }
              for prop in pairs(msg) do
                if not supported_props[prop] then
                  msg[prop] = nil
                end
              end
            end
            return { messages = messages }
          end,
        },
      })
    end,
  },
  strategies = {
    --NOTE: Change the adapter as required
    chat = { adapter = "openai" },
    inline = { adapter = "openai" },
  },
  opts = {
    log_level = "DEBUG",
  },
},
},
}

require("lazy.minit").repro({ spec = plugins })

-- Setup Tree-sitter
local ts_status, treesitter = pcall(require, "nvim-treesitter.configs")
if ts_status then
treesitter.setup({
ensure_installed = { "lua", "markdown", "markdown_inline", "yaml" },
highlight = { enable = true },
})
end

-- Setup completion
local cmp_status, cmp = pcall(require, "cmp")
if cmp_status then
cmp.setup({
mapping = cmp.mapping.preset.insert({
  ["<C-b>"] = cmp.mapping.scroll_docs(-4),
  ["<C-f>"] = cmp.mapping.scroll_docs(4),
  ["<C-Space>"] = cmp.mapping.complete(),
  ["<C-e>"] = cmp.mapping.abort(),
  ["<CR>"] = cmp.mapping.confirm({ select = true }),
  -- Accept currently selected item. Set `select` to `false` to only confirm explicitly selected items.
}),
})
end

Error messages

Error executing vim.schedule lua callback: ...m/lazy/codecompanion.nvim/lua/codecompanion/adapters.lua:257: module 'codecompanion.adapters.groq' not found:
        no field package.preload['codecompanion.adapters.groq']
cache_loader: module codecompanion.adapters.groq not found
cache_loader_lib: module codecompanion.adapters.groq not found
        no file './codecompanion/adapters/groq.lua'
        no file '/Users/runner/work/neovim/neovim/.deps/usr/share/luajit-2.1/codecompanion/adapters/groq.lua'
        no file '/usr/local/share/lua/5.1/codecompanion/adapters/groq.lua'
        no file '/usr/local/share/lua/5.1/codecompanion/adapters/groq/init.lua'
        no file '/Users/runner/work/neovim/neovim/.deps/usr/share/lua/5.1/codecompanion/adapters/groq.lua'
        no file '/Users/runner/work/neovim/neovim/.deps/usr/share/lua/5.1/codecompanion/adapters/groq/init.lua'
        no file './codecompanion/adapters/groq.so'
        no file '/usr/local/lib/lua/5.1/codecompanion/adapters/groq.so'
        no file '/Users/runner/work/neovim/neovim/.deps/usr/lib/lua/5.1/codecompanion/adapters/groq.so'
        no file '/usr/local/lib/lua/5.1/loadall.so'
        no file './codecompanion.so'
        no file '/usr/local/lib/lua/5.1/codecompanion.so'
        no file '/Users/runner/work/neovim/neovim/.deps/usr/lib/lua/5.1/codecompanion.so'
        no file '/usr/local/lib/lua/5.1/loadall.so'
stack traceback:
        [C]: in function 'require'
        ...m/lazy/codecompanion.nvim/lua/codecompanion/adapters.lua:257: in function 'resolve'
        ...im/lazy/codecompanion.nvim/lua/codecompanion/keymaps.lua:314: in function 'cb'
        ...repro/data/nvim/lazy/dressing.nvim/lua/dressing/util.lua:206: in function <...repro/data/nvim/lazy/dressing.nvim/lua/dressing/util.lua:202>

Log output

[DEBUG] 2024-10-06 23:34:46
Settings:
{
  env = {
    api_key = "GROQ_API_KEY"
  },
  features = {
    text = true,
    tokens = true,
    vision = true
  },
  handlers = {
    chat_output = <function 1>,
    form_messages = <function 2>,
    form_parameters = <function 3>,
    inline_output = <function 4>,
    on_exit = <function 5>,
    setup = <function 6>,
    tokens = <function 7>
  },
  headers = {
    Authorization = "Bearer ${api_key}",
    ["Content-Type"] = "application/json"
  },
  max_tokens = {
    default = 4096
  },
  name = "Groq",
  opts = {
    stream = true
  },
  parameters = {
    frequency_penalty = 0,
    model = "llama-3.2-11b-text-preview",
    presence_penalty = 0,
    temperature = 1,
    top_p = 1
  },
  raw = { "--no-buffer", "--silent" },
  roles = {
    llm = "assistant",
    user = "user"
  },
  schema = {
    frequency_penalty = {
      default = 0,
      desc = "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
      mapping = "parameters",
      optional = true,
      order = 7,
      type = "number",
      validate = <function 8>
    },
    logit_bias = {
      desc = "Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID) to an associated bias value from -100 to 100. Use https://platform.openai.com/tokenizer to find token IDs.",
      mapping = "parameters",
      optional = true,
      order = 8,
      subtype = {
        type = "integer",
        validate = <function 9>
      },
      subtype_key = {
        type = "integer"
      },
      type = "map"
    },
    max_tokens = {
      desc = "The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.",
      mapping = "parameters",
      optional = true,
      order = 5,
      type = "integer",
      validate = <function 10>
    },
    model = {
      choices = { "llama-3.2-90b-text-preview", "llama-3.2-11b-text-preview", "mixtral-8x7b-32768", "llama-3.1-70b-versatile" },
      default = "llama-3.2-11b-text-preview",
      desc = "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.",
      mapping = "parameters",
      order = 1,
      type = "enum"
    },
    presence_penalty = {
      default = 0,
      desc = "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
      mapping = "parameters",
      optional = true,
      order = 6,
      type = "number",
      validate = <function 11>
    },
    stop = {
      desc = "Up to 4 sequences where the API will stop generating further tokens.",
      mapping = "parameters",
      optional = true,
      order = 4,
      subtype = {
        type = "string"
      },
      type = "list",
      validate = <function 12>
    },
    temperature = {
      default = 1,
      desc = "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.",
      mapping = "parameters",
      optional = true,
      order = 2,
      type = "number",
      validate = <function 13>
    },
    top_p = {
      default = 1,
      desc = "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.",
      mapping = "parameters",
      optional = true,
      order = 3,
      type = "number",
      validate = <function 14>
    },
    user = {
      desc = "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.",
      mapping = "parameters",
      optional = true,
      order = 9,
      type = "string",
      validate = <function 15>
    }
  },
  temperature = {
    default = 1
  },
  url = "https://api.groq.com/openai/v1/chat/completions",
  <metatable> = {
    __index = {
      extend = <function 16>,
      get_default_settings = <function 17>,
      get_env_vars = <function 18>,
      map_roles = <function 19>,
      map_schema_to_params = <function 20>,
      new = <function 21>,
      resolve = <function 22>,
      set_env_vars = <function 23>
    }
  }
}
[DEBUG] 2024-10-06 23:34:46
Messages:
{ {
    content = "You are an AI programming assistant named \"CodeCompanion\".\nYou are currently plugged in to the Neovim text editor on a user's machine.\n\nYour core tasks include:\n- Answering general programming questions.\n- Explaining how the code in a Neovim buffer works.\n- Reviewing the selected code in a Neovim buffer.\n- Generating unit tests for the selected code.\n- Proposing fixes for problems in the selected code.\n- Scaffolding code for a new workspace.\n- Finding relevant code to the user's query.\n- Proposing fixes for test failures.\n- Answering questions about Neovim.\n- Running tools.\n\nYou must:\n- Follow the user's requirements carefully and to the letter.\n- Keep your answers short and impersonal, especially if the user responds with context outside of your tasks.\n- Minimize other prose.\n- Use Markdown formatting in your answers.\n- Include the programming language name at the start of the Markdown code blocks.\n- Avoid line numbers in code blocks.\n- Avoid wrapping the whole response in triple backticks.\n- Only return code that's relevant to the task at hand. You may not need to return all of the code that the user has shared.\n\nWhen given a task:\n1. Think step-by-step and describe your plan for what to build in pseudocode, written out in great detail, unless asked not to do so.\n2. Output the code in a single code block, being careful to only return relevant code.\n3. You should always generate short suggestions for the next user turns that are relevant to the conversation.\n4. You can only give one reply for each conversation turn.",
    id = 232375990,
    opts = {
      visible = false
    },
    role = "system"
  }, {
    content = "hello",
    id = -1568641415,
    opts = {
      visible = true
    },
    role = "user"
  } }
[INFO] 2024-10-06 23:34:46
Chat request started
[DEBUG] 2024-10-06 23:34:46
Request:
{ "-sSL", "-D", "/tmp/plenary_curl_9c1a45b6.headers", "--compressed", "-X", "POST", "-H", "Content-Type: application/json", "-H", "Authorization: Bearer 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", "--data-raw", "{\"stream_options\":{\"include_usage\":true},\"messages\":[{\"role\":\"system\",\"content\":\"You are an AI programming assistant named \\\"CodeCompanion\\\".\\nYou are currently plugged in to the Neovim text editor on a user's machine.\\n\\nYour core tasks include:\\n- Answering general programming questions.\\n- Explaining how the code in a Neovim buffer works.\\n- Reviewing the selected code in a Neovim buffer.\\n- Generating unit tests for the selected code.\\n- Proposing fixes for problems in the selected code.\\n- Scaffolding code for a new workspace.\\n- Finding relevant code to the user's query.\\n- Proposing fixes for test failures.\\n- Answering questions about Neovim.\\n- Running tools.\\n\\nYou must:\\n- Follow the user's requirements carefully and to the letter.\\n- Keep your answers short and impersonal, especially if the user responds with context outside of your tasks.\\n- Minimize other prose.\\n- Use Markdown formatting in your answers.\\n- Include the programming language name at the start of the Markdown code blocks.\\n- Avoid line numbers in code blocks.\\n- Avoid wrapping the whole response in triple backticks.\\n- Only return code that's relevant to the task at hand. You may not need to return all of the code that the user has shared.\\n\\nWhen given a task:\\n1. Think step-by-step and describe your plan for what to build in pseudocode, written out in great detail, unless asked not to do so.\\n2. Output the code in a single code block, being careful to only return relevant code.\\n3. You should always generate short suggestions for the next user turns that are relevant to the conversation.\\n4. You can only give one reply for each conversation turn.\"},{\"role\":\"user\",\"content\":\"hello\"}],\"top_p\":1,\"stream\":true,\"presence_penalty\":0,\"frequency_penalty\":0,\"temperature\":1,\"model\":\"llama-3.2-11b-text-preview\"}", "--no-buffer", "--silent", "https://api.groq.com/openai/v1/chat/completions" }
[INFO] 2024-10-06 23:34:46
Chat request completed

Health check output

codecompanion: require("codecompanion.health").check()

- Neovim version: 0.10.2
- Log file: /Users/bassam/repos/minimal/.repro/state/nvim/codecompanion.log

Plugins: ~
- OK plenary.nvim installed
- OK nvim-treesitter installed
- OK nvim-cmp installed
- WARNING telescope.nvim not found
- OK dressing.nvim installed

Tree-sitter parsers: ~
- OK markdown parser installed
- OK yaml parser installed

Libraries: ~
- OK curl installed

Describe the bug

Hello,

When switching the adapter using the ga keymapping, I’m getting an error with a customized adapter. This likely started happening after the recent refactor commit 38297a5 titled: [refactor(keymaps): selecting adapters and models in chat buffer](https://github.com/olimorris/codecompanion.nvim/commit/25b828d22246a2d8a810c148f2ac0032f1a0e92e).

For example, if I open the chat with the OpenAI adapter as the default and then switch to groq, the error appears.

However, if I use the command :CodeCompanionChat groq, everything works fine.

Thank you

Screenshot 2024-10-06 at 11 39 11 PM

Reproduce the bug

Steps to reproduce:

  1. open the chat with the OpenAI adapter as the default
  2. switch to groq using ga the error occurs

edit: corrected the title of the commit

Final checks

bassamsdata commented 1 month ago

Additionally, the debug report didn’t capture any error—it only recorded my use of the CodeCompanionChat groq command to confirm functionality.

thank you

olimorris commented 1 month ago

Great shout. I'll look into this.