Open Shelushun opened 8 months ago
We managed to get rid of this error by deleting the virtual environment and reinstalling the entire system on Python 3.10 There is no more error when embedding text. But now there is another error when launching the model itself. Although she hadn't been there before.
(localgpt) F:\localGPT>python run_localGPT.py
Traceback (most recent call last):
File "F:\localGPT\run_localGPT.py", line 24, in <module>
from load_models import (
File "F:\localGPT\load_models.py", line 6, in <module>
from auto_gptq import AutoGPTQForCausalLM
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\auto_gptq\__init__.py", line 4, in <module>
from .utils.peft_utils import get_gptq_peft_model
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\auto_gptq\utils\peft_utils.py", line 9, in <module>
from peft import get_peft_model, PeftConfig, PeftModel, PeftType
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\peft\__init__.py", line 22, in <module>
from .auto import (
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\peft\auto.py", line 32, in <module>
from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\peft\mapping.py", line 22, in <module>
from .mixed_model import PeftMixedModel
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\peft\mixed_model.py", line 26, in <module>
from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\peft\tuners\__init__.py", line 21, in <module>
from .lora import LoraConfig, LoraModel, LoftQConfig
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\peft\tuners\lora\__init__.py", line 20, in <module>
from .model import LoraModel
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\peft\tuners\lora\model.py", line 42, in <module>
from .awq import dispatch_awq
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\peft\tuners\lora\awq.py", line 26, in <module>
from awq.modules.linear import WQLinear_GEMM
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\awq\__init__.py", line 2, in <module>
from awq.models.auto import AutoAWQForCausalLM
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\awq\models\__init__.py", line 15, in <module>
from .mixtral import MixtralAWQForCausalLM
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\awq\models\mixtral.py", line 7, in <module>
from awq.modules.fused.moe import FusedSparseMoeBlock
File "C:\Users\Cash\AppData\Local\NVIDIA\MiniConda\envs\localgpt\lib\site-packages\awq\modules\fused\moe.py", line 2, in <module>
import triton
ModuleNotFoundError: No module named 'triton'
I use the IlyaGusev/saiga_mistral_7b_guf model
cointegrated/rubert-tiny2 embedding model