chenfei-wu / TaskMatrix

Other
34.51k stars 3.31k forks source link

AssertionError: Torch not compiled with CUDA enabled #307

Open masoningithub opened 1 year ago

masoningithub commented 1 year ago

╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ D:\Python\visual-chatgpt\visual_chatgpt.py:1051 in │ │ │ │ 1048 │ parser.add_argument('--load', type=str, default="ImageCaptioning_cuda:0,Text2Image_c │ │ 1049 │ args = parser.parse_args() │ │ 1050 │ loaddict = {e.split('')[0].strip(): e.split('_')[1].strip() for e in args.load.spl │ │ ❱ 1051 │ bot = ConversationBot(load_dict=load_dict) │ │ 1052 │ with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo: │ │ 1053 │ │ chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT") │ │ 1054 │ │ state = gr.State([]) │ │ │ │ D:\Python\visual-chatgpt\visual_chatgpt.py:987 in init │ │ │ │ 984 │ │ self.models = {} │ │ 985 │ │ # Load Basic Foundation Models │ │ 986 │ │ for class_name, device in load_dict.items(): │ │ ❱ 987 │ │ │ self.models[class_name] = globals()class_name │ │ 988 │ │ │ │ 989 │ │ # Load Template Foundation Models │ │ 990 │ │ for class_name, module in globals().items(): │ │ │ │ D:\Python\visual-chatgpt\visual_chatgpt.py:313 in init │ │ │ │ 310 │ │ self.device = device │ │ 311 │ │ self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 │ │ 312 │ │ self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning │ │ ❱ 313 │ │ self.model = BlipForConditionalGeneration.from_pretrained( │ │ 314 │ │ │ "Salesforce/blip-image-captioning-base", torch_dtype=self.torch_dtype).to(se │ │ 315 │ │ │ 316 │ @prompts(name="Get Photo Description", │ │ │ │ F:\Programs\miniconda3\envs\visgpt\lib\site-packages\transformers\modeling_utils.py:1811 in to │ │ │ │ 1808 │ │ │ │ " model has already been set to the correct devices and casted to the co │ │ 1809 │ │ │ ) │ │ 1810 │ │ else: │ │ ❱ 1811 │ │ │ return super().to(*args, *kwargs) │ │ 1812 │ │ │ 1813 │ def half(self, args): │ │ 1814 │ │ # Checks if the model has been loaded in 8-bit │ │ │ │ F:\Programs\miniconda3\envs\visgpt\lib\site-packages\torch\nn\modules\module.py:927 in to │ │ │ │ 924 │ │ │ │ │ │ │ non_blocking, memory_format=convert_to_format) │ │ 925 │ │ │ return t.to(device, dtype if t.is_floating_point() or t.is_complex() else No │ │ 926 │ │ │ │ ❱ 927 │ │ return self._apply(convert) │ │ 928 │ │ │ 929 │ def register_backward_hook( │ │ 930 │ │ self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]] │ │ │ │ F:\Programs\miniconda3\envs\visgpt\lib\site-packages\torch\nn\modules\module.py:579 in _apply │ │ │ │ 576 │ │ │ 577 │ def _apply(self, fn): │ │ 578 │ │ for module in self.children(): │ │ ❱ 579 │ │ │ module._apply(fn) │ │ 580 │ │ │ │ 581 │ │ def compute_should_use_set_data(tensor, tensor_applied): │ │ 582 │ │ │ if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): │ │ │ │ F:\Programs\miniconda3\envs\visgpt\lib\site-packages\torch\nn\modules\module.py:579 in _apply │ │ │ │ 576 │ │ │ 577 │ def _apply(self, fn): │ │ 578 │ │ for module in self.children(): │ │ ❱ 579 │ │ │ module._apply(fn) │ │ 580 │ │ │ │ 581 │ │ def compute_should_use_set_data(tensor, tensor_applied): │ │ 582 │ │ │ if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): │ │ │ │ F:\Programs\miniconda3\envs\visgpt\lib\site-packages\torch\nn\modules\module.py:579 in _apply │ │ │ │ 576 │ │ │ 577 │ def _apply(self, fn): │ │ 578 │ │ for module in self.children(): │ │ ❱ 579 │ │ │ module._apply(fn) │ │ 580 │ │ │ │ 581 │ │ def compute_should_use_set_data(tensor, tensor_applied): │ │ 582 │ │ │ if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): │ │ │ │ F:\Programs\miniconda3\envs\visgpt\lib\site-packages\torch\nn\modules\module.py:602 in _apply │ │ │ │ 599 │ │ │ # track autograd history of param_applied, so we have to use │ │ 600 │ │ │ # with torch.no_grad(): │ │ 601 │ │ │ with torch.no_grad(): │ │ ❱ 602 │ │ │ │ param_applied = fn(param) │ │ 603 │ │ │ should_use_set_data = compute_should_use_set_data(param, param_applied) │ │ 604 │ │ │ if should_use_set_data: │ │ 605 │ │ │ │ param.data = param_applied │ │ │ │ F:\Programs\miniconda3\envs\visgpt\lib\site-packages\torch\nn\modules\module.py:925 in convert │ │ │ │ 922 │ │ │ if convert_to_format is not None and t.dim() in (4, 5): │ │ 923 │ │ │ │ return t.to(device, dtype if t.is_floating_point() or t.is_complex() els │ │ 924 │ │ │ │ │ │ │ non_blocking, memory_format=convert_to_format) │ │ ❱ 925 │ │ │ return t.to(device, dtype if t.is_floating_point() or t.is_complex() else No │ │ 926 │ │ │ │ 927 │ │ return self._apply(convert) │ │ 928 │ │ │ │ F:\Programs\miniconda3\envs\visgpt\lib\site-packages\torch\cuda__init__.py:211 in _lazy_init │ │ │ │ 208 │ │ │ │ "Cannot re-initialize CUDA in forked subprocess. To use CUDA with " │ │ 209 │ │ │ │ "multiprocessing, you must use the 'spawn' start method") │ │ 210 │ │ if not hasattr(torch._C, '_cuda_getDeviceCount'): │ │ ❱ 211 │ │ │ raise AssertionError("Torch not compiled with CUDA enabled") │ │ 212 │ │ if _cudart is None: │ │ 213 │ │ │ raise AssertionError( │ │ 214 │ │ │ │ "libcudart functions unavailable. It looks like you have a broken build? │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ AssertionError: Torch not compiled with CUDA enabled

silvere commented 1 year ago

install nvida cuda toolset first. and then reinstall pytorch with cuda enabled. works fine for me.

chat1q2w3e4r5t commented 1 year ago

1.quick start

Advice for CPU Users

python visual_chatgpt.py --load ImageCaptioning_cpu,Text2Image_cpu

Advice for 1 Tesla T4 15GB (Google Colab) -----it is the default

python visual_chatgpt.py --load "ImageCaptioning_cuda:0,Text2Image_cuda:0"

2.change the default value for cpu users

parser.add_argument('--load', type=str, default="ImageCaptioning_cuda:0,Text2Image_cuda:0")

# for cpu user
parser.add_argument('--load', type=str, default="ImageCaptioning_cpu,Text2Image_cpu")

It is the simple method ,but cannot use the cuda