Open Anonym0us33 opened 1 year ago
its for the image detection
make sure your bot permissions are set like this
it works. but every await request seems to upset it. not sure if this is because it's talking to sillytavern instead of tavern or if it's something else
File "<userpath>\.pyenv\pyenv-win\versions\3.9.6\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "venv\lib\site-packages\discord\client.py", line 441, in _run_event
await coro(*args, **kwargs)
File "<path>\cogs\messagehandler.py", line 62, in on_message
response = await self.bot.get_cog("chatbot").chat_command(message, message.clean_content)
File "venv\lib\site-packages\discord\ext\commands\core.py", line 588, in __call__
return await self.callback(self.cog, context, *args, **kwargs) # type: ignore
File "<path>\cogs\pygbot.py", line 230, in chat_command
response = await self.chatbot.generate_response(message, message_content)
File "<path>\cogs\pygbot.py", line 184, in generate_response
response_text = conversation(input_dict)
File "venv\lib\site-packages\langchain\chains\base.py", line 252, in __call__
self._call(inputs, run_manager=run_manager)
File "venv\lib\site-packages\langchain\chains\llm.py", line 92, in _call
response = self.generate([inputs], run_manager=run_manager)
File "venv\lib\site-packages\langchain\chains\llm.py", line 102, in generate
return self.llm.generate_prompt(
File "venv\lib\site-packages\langchain\llms\base.py", line 455, in generate_prompt
return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)
File "venv\lib\site-packages\langchain\llms\base.py", line 586, in generate
output = self._generate_helper(
File "venv\lib\site-packages\langchain\llms\base.py", line 479, in _generate_helper
self._generate(
File "venv\lib\site-packages\langchain\llms\base.py", line 965, in _generate
self._call(prompt, stop=stop, run_manager=run_manager, **kwargs)
File "venv\lib\site-packages\langchain\llms\koboldai.py", line 175, in _call
response = requests.post(
File "venv\lib\site-packages\requests\api.py", line 115, in post
return request("post", url, data=data, json=json, **kwargs)
File "venv\lib\site-packages\requests\api.py", line 59, in request
return session.request(method=method, url=url, **kwargs)
File "venv\lib\site-packages\requests\sessions.py", line 589, in request
resp = self.send(prep, **send_kwargs)
File "venv\lib\site-packages\requests\sessions.py", line 703, in send
r = adapter.send(request, **kwargs)
File "venv\lib\site-packages\requests\adapters.py", line 486, in send
resp = conn.urlopen(
File "venv\lib\site-packages\urllib3\connectionpool.py", line 790, in urlopen
response = self._make_request(
File "venv\lib\site-packages\urllib3\connectionpool.py", line 536, in _make_request
response = conn.getresponse()
File "venv\lib\site-packages\urllib3\connection.py", line 461, in getresponse
httplib_response = super().getresponse()
File "<userpath>\.pyenv\pyenv-win\versions\3.9.6\lib\http\client.py", line 1349, in getresponse
response.begin()
File "<userpath>\.pyenv\pyenv-win\versions\3.9.6\lib\http\client.py", line 316, in begin
version, status, reason = self._read_status()
File "<userpath>\.pyenv\pyenv-win\versions\3.9.6\lib\http\client.py", line 277, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "<userpath>\.pyenv\pyenv-win\versions\3.9.6\lib\socket.py", line 704, in readinto
return self._sock.recv_into(b)
I also found the line to change the token count from langchain so might make a PR for that too. it could be an option from the command line like --tokens [int number 1~512] though i'm not sure if expanded context sizes will work. could be something to think about letting the bots read the entire discord channel as context since 16k is coming to llama soon.
i set ENDPOINT="http://127.0.0.1:5001/" but it still downloads a 1B model? I'm running llama 70b locally so why does it need to download another pt model?
it also gives
the token looks like DISCORD_BOT_TOKEN="aaaaaaaaaaaaaaaaaaaaaaaaaa.Aa##A#.AAaaaaaaaaaaaaaa_aa_aaaa_aaaaaaaaaaaaa"