root@debian:~/.config# ls
amass go io.datasette.llm notify nuclei solana subfinder uncover
(venv) root@debian:~/.config# cd io.datasette.llm
(venv) root@debian:~/.config/io.datasette.llm# ls
aliases.json keys.json logs.db mlc
(venv) root@debian:~/.config/io.datasette.llm# ls -lra
total 68
drwxr-xr-x 3 root root 4096 Oct 27 22:34 mlc
-rw-r--r-- 1 root root 45056 Oct 27 22:41 logs.db
-rw-r--r-- 1 root root 149 Oct 27 22:29 keys.json
-rw-r--r-- 1 root root 175 Oct 27 22:39 aliases.json
drwxr-xr-x 10 root root 4096 Oct 27 22:28 ..
drwxr-xr-x 3 root root 4096 Oct 27 22:41 .
(venv) root@debian:~/.config/io.datasette.llm# cd mlc
(venv) root@debian:~/.config/io.datasette.llm/mlc# ls
dist
(venv) root@debian:~/.config/io.datasette.llm/mlc# cd dist
(venv) root@debian:~/.config/io.datasette.llm/mlc/dist# ls
prebuilt
(venv) root@debian:~/.config/io.datasette.llm/mlc/dist# cd prebuilt
(venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# ls
lib mlc-chat-Llama-2-13b-chat-hf-q4f16_1 mlc-chat-Llama-2-7b-chat-hf-q4f16_1 mlc-chat-WizardLM-13B-V1.2-q4f16_1
(venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# make install
make: No rule to make target 'install'. Stop.
(venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# make
make: No targets specified and no makefile found. Stop.
(venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# make unix
make: *** No rule to make target 'unix'. Stop.
(venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# make mlc-chat-Llama-2-7b-chat-hf-q4f16_1
make: Nothing to be done for 'mlc-chat-Llama-2-7b-chat-hf-q4f161'.
(venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# cd ..
(venv) root@debian:~/.config/io.datasette.llm/mlc/dist# cd
(venv) root@debian:~# ls
'^^' dbd go nuclei nuclei-templates ParamSpider reconftw Tools wget-log
(venv) root@debian:~# llm --help
Usage: llm [OPTIONS] COMMAND [ARGS]...
Access large language models from the command-line
To get started, obtain an OpenAI key and set it like this:
$ llm keys set openai
Enter key: ...
Then execute a prompt like this:
llm 'Five outrageous names for a pet pelican'
Options:
--version Show the version and exit.
--help Show this message and exit.
Commands:
prompt* Execute a prompt
aliases Manage model aliases
chat Hold an ongoing chat with a model.
collections View and manage collections of embeddings
embed Embed text and store or return the result
embed-models Manage available embedding models
embed-multi Store embeddings for multiple strings at once
install Install packages from PyPI into the same environment as LLM
keys Manage stored API keys for different models
logs Tools for exploring logged prompts and responses
mlc Commands for managing MLC models
models Manage available models
openai Commands for working directly with the OpenAI API
plugins List installed plugins
similar Return top N similar IDs from a collection
templates Manage stored prompt templates
uninstall Uninstall Python packages from the LLM environment
(venv) root@debian:~# llm chat
Chatting with gpt-3.5-turbo
Type 'exit' or 'quit' to exit
Type '!multi' to enter multiple lines, then '!end' to finish
hi
Traceback (most recent call last):
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_requestor.py", line 413, in handle_error_response
error_data = resp["error"]
TypeError: string indices must be integers, not 'str'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/user1/llm/path/to/venv/bin/llm", line 8, in
sys.exit(cli())
^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/click/core.py", line 1157, in call
return self.main(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/click/core.py", line 1078, in main
rv = self.invoke(ctx)
^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, ctx.params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/click/core.py", line 783, in invoke
return __callback(args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/llm/cli.py", line 438, in chat
for chunk in response:
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/llm/models.py", line 91, in iter
for chunk in self.model.execute(
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/llm/default_plugins/openai_models.py", line 274, in execute
completion = openai.ChatCompletion.create(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_resources/chat_completion.py", line 25, in create
return super().create(args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_resources/abstract/engine_apiresource.py", line 155, in create
response, , api_key = requestor.request(
^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_requestor.py", line 299, in request
resp, got_stream = self._interpret_response(result, stream)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_requestor.py", line 710, in _interpret_response
self._interpret_response_line(
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_requestor.py", line 775, in _interpret_response_line
raise self.handle_error_response(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_requestor.py", line 415, in handle_error_response
raise error.APIError(
openai.error.APIError: Invalid response object from API: '400 Bad Request' (HTTP response code was 400)
(venv) root@debian:~# llm -m Llama-2-7b-chat 'five names for a cute pet ferret'
Error: You must install mlc_chat first. See https://github.com/simonw/llm-mlc for instructions.
root@debian:~/.config# ls amass go io.datasette.llm notify nuclei solana subfinder uncover (venv) root@debian:~/.config# cd io.datasette.llm (venv) root@debian:~/.config/io.datasette.llm# ls aliases.json keys.json logs.db mlc (venv) root@debian:~/.config/io.datasette.llm# ls -lra total 68 drwxr-xr-x 3 root root 4096 Oct 27 22:34 mlc -rw-r--r-- 1 root root 45056 Oct 27 22:41 logs.db -rw-r--r-- 1 root root 149 Oct 27 22:29 keys.json -rw-r--r-- 1 root root 175 Oct 27 22:39 aliases.json drwxr-xr-x 10 root root 4096 Oct 27 22:28 .. drwxr-xr-x 3 root root 4096 Oct 27 22:41 . (venv) root@debian:~/.config/io.datasette.llm# cd mlc (venv) root@debian:~/.config/io.datasette.llm/mlc# ls dist (venv) root@debian:~/.config/io.datasette.llm/mlc# cd dist (venv) root@debian:~/.config/io.datasette.llm/mlc/dist# ls prebuilt (venv) root@debian:~/.config/io.datasette.llm/mlc/dist# cd prebuilt (venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# ls lib mlc-chat-Llama-2-13b-chat-hf-q4f16_1 mlc-chat-Llama-2-7b-chat-hf-q4f16_1 mlc-chat-WizardLM-13B-V1.2-q4f16_1 (venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# make install make: No rule to make target 'install'. Stop. (venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# make make: No targets specified and no makefile found. Stop. (venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# make unix make: *** No rule to make target 'unix'. Stop. (venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# make mlc-chat-Llama-2-7b-chat-hf-q4f16_1 make: Nothing to be done for 'mlc-chat-Llama-2-7b-chat-hf-q4f161'. (venv) root@debian:~/.config/io.datasette.llm/mlc/dist/prebuilt# cd .. (venv) root@debian:~/.config/io.datasette.llm/mlc/dist# cd (venv) root@debian:~# ls '^^' dbd go nuclei nuclei-templates ParamSpider reconftw Tools wget-log (venv) root@debian:~# llm --help Usage: llm [OPTIONS] COMMAND [ARGS]...
Access large language models from the command-line
Documentation: https://llm.datasette.io/
To get started, obtain an OpenAI key and set it like this:
Then execute a prompt like this:
Options: --version Show the version and exit. --help Show this message and exit.
Commands: prompt* Execute a prompt aliases Manage model aliases chat Hold an ongoing chat with a model. collections View and manage collections of embeddings embed Embed text and store or return the result embed-models Manage available embedding models embed-multi Store embeddings for multiple strings at once install Install packages from PyPI into the same environment as LLM keys Manage stored API keys for different models logs Tools for exploring logged prompts and responses mlc Commands for managing MLC models models Manage available models openai Commands for working directly with the OpenAI API plugins List installed plugins similar Return top N similar IDs from a collection templates Manage stored prompt templates uninstall Uninstall Python packages from the LLM environment (venv) root@debian:~# llm chat Chatting with gpt-3.5-turbo Type 'exit' or 'quit' to exit Type '!multi' to enter multiple lines, then '!end' to finish
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "/home/user1/llm/path/to/venv/bin/llm", line 8, in
sys.exit(cli())
^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/click/core.py", line 1157, in call
return self.main(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/click/core.py", line 1078, in main
rv = self.invoke(ctx)
^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, ctx.params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/click/core.py", line 783, in invoke
return __callback(args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/llm/cli.py", line 438, in chat
for chunk in response:
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/llm/models.py", line 91, in iter
for chunk in self.model.execute(
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/llm/default_plugins/openai_models.py", line 274, in execute
completion = openai.ChatCompletion.create(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_resources/chat_completion.py", line 25, in create
return super().create(args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_resources/abstract/engine_apiresource.py", line 155, in create
response, , api_key = requestor.request(
^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_requestor.py", line 299, in request
resp, got_stream = self._interpret_response(result, stream)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_requestor.py", line 710, in _interpret_response
self._interpret_response_line(
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_requestor.py", line 775, in _interpret_response_line
raise self.handle_error_response(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/user1/llm/path/to/venv/lib/python3.11/site-packages/openai/api_requestor.py", line 415, in handle_error_response
raise error.APIError(
openai.error.APIError: Invalid response object from API: '400 Bad Request' (HTTP response code was 400)
(venv) root@debian:~# llm -m Llama-2-7b-chat 'five names for a cute pet ferret'
Error: You must install mlc_chat first. See https://github.com/simonw/llm-mlc for instructions.