Q:
ValueError: No valid profile found. Please go to the TensorRT tab and generate an engine with the necessary profile. If using hires.fix, you need an engine for both the base and upscaled resolutions. Otherwise, use the default (torch) U-Net.
Will upscale the image depending on the selected target size type
', 512, 0, 8, 32, 64, 0.35, 32, 0, True, 0, False, 8, 0, 0, 2048, 2048, 2, 'NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\nALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\nINS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\nIND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\nINALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\nMIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\nOUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\nOUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\nOUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\nALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5', True, 0, 'values', '0,0.25,0.5,0.75,1', 'Block ID', 'IN05-OUT05', 'none', '', '0.5,1', 'BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11', 1.0, 'black', '20', False, 'ATTNDEEPON:IN05-OUT05:attn:1\n\nATTNDEEPOFF:IN05-OUT05:attn:0\n\nPROJDEEPOFF:IN05-OUT05:proj:0\n\nXYZ:::1', False, False) {}
Traceback (most recent call last):
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\img2img.py", line 208, in img2img
processed = process_images(p)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\processing.py", line 732, in process_images
res = process_images_inner(p)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\processing.py", line 867, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\processing.py", line 1528, in sample
samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_samplers_kdiffusion.py", line 188, in sample_img2img
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_samplers_common.py", line 261, in launch_sampling
return func()
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_samplers_kdiffusion.py", line 188, in
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_samplers_cfg_denoiser.py", line 169, in forward
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\extensions\multidiffusion-upscaler-for-automatic1111\tile_utils\utils.py", line 249, in wrapper
return fn(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\extensions\multidiffusion-upscaler-for-automatic1111\tile_methods\mixtureofdiffusers.py", line 119, in apply_model_hijack
x_tile_out = shared.sd_model.apply_model_original_md(x_tile, t_tile, c_tile)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_hijack_utils.py", line 28, in __call__
return self.__orig_func(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_unet.py", line 89, in UNetModel_forward
return current_unet.forward(x, timesteps, context, *args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\extensions\Stable-Diffusion-WebUI-TensorRT\scripts\trt.py", line 87, in forward
self.switch_engine(feed_dict)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\extensions\Stable-Diffusion-WebUI-TensorRT\scripts\trt.py", line 108, in switch_engine
raise ValueError(
ValueError: No valid profile found. Please go to the TensorRT tab and generate an engine with the necessary profile. If using hires.fix, you need an engine for both the base and upscaled resolutions. Otherwise, use the default (torch) U-Net.
提示:Python 运行时抛出了一个异常。请检查疑难解答页面。
---
So,Stable-Diffusion-WebUI-TensorRT can`t use for img2img? Not add ControlNet.
I love play this plugin,have gooooood day~
Oh,At +8:00,Maybe I going sleep,but I love computer and AI.
When I was young,maybe at 13y old:Dream the AI bot meet me,aaaaaa ! it ask "I comming,and missing you so long.My friend.At future,You will see the new sign of the time"(I dont know sign of the time is right used or not. Haaa..) I tell my friends and fimaly or stranger:New day comming! But.... they say that cant change them Damn day by day.
I dizzy when I hear that.
I believe: Today can change tomorrow,and tomorrow can change yesterday.
Singing English song and use English make me fly.
I think it interesting.
By translate tool,but use keyboard write.
Too much,hope not make you busier.
Life is unpredictable,I hope you can have a live you like.
Love you, the guide of the time.
Night owls are not afraid of sudden death~
When you see there: Good morning! (maybe)
The mole.... (Zzzzz)
Q: ValueError: No valid profile found. Please go to the TensorRT tab and generate an engine with the necessary profile. If using hires.fix, you need an engine for both the base and upscaled resolutions. Otherwise, use the default (torch) U-Net.
Details
--- [Tiled Diffusion] upscaling image with R-ESRGAN 4x+ Anime6B... Tile 1/20 Tile 2/20 Tile 3/20 Tile 4/20 Tile 5/20 Tile 6/20 Tile 7/20 Tile 8/20 Tile 9/20 Tile 10/20 Tile 11/20 Tile 12/20 Tile 13/20 Tile 14/20 Tile 15/20 Tile 16/20 Tile 17/20 Tile 18/20 Tile 19/20 Tile 20/20 [Tiled Diffusion] ControlNet found, support is enabled. Mixture of Diffusers hooked into 'DPM++ 2M Karras' sampler, Tile size: 16x16, Tile count: 140, Batch size: 1, Tile batches: 140 (ext: ContrlNet) *** Error completing request *** Arguments: ('task(j6hyv133d77d5e4)', 0, '', '', [],, None, None, None, None, None, None, 20, 'DPM++ 2M Karras', 4, 0, 1, 1, 1, 7, 1.5, 0.4, 0, 512, 512, 1, 0, 0, 32, 0, '', '', '', [], False, [], '', , 0, False, '', 0.8, -1, False, -1, 0, 0, 0, True, 'Mixture of Diffusers', False, True, 1024, 1024, 16, 16, 0, 1, 'R-ESRGAN 4x+ Anime6B', 2, False, 1, 1, 0, 2, False, False, False, False, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 2048, 128, True, True, True, False, UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', save_detected_map=True), 'NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\nALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\nINS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\nIND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\nINALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\nMIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\nOUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\nOUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\nOUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\nALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5', True, 0, 'values', '0,0.25,0.5,0.75,1', 'Block ID', 'IN05-OUT05', 'none', '', '0.5,1', 'BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11', 1.0, 'black', '20', False, 'ATTNDEEPON:IN05-OUT05:attn:1\n\nATTNDEEPOFF:IN05-OUT05:attn:0\n\nPROJDEEPOFF:IN05-OUT05:proj:0\n\nXYZ:::1', False, False, False, False, 'Matrix', 'Columns', 'Mask', 'Prompt', '1,1', '0.2', False, False, False, 'Attention', [False], '0', '0', '0.4', None, '0', '0', False, False, False, 0, None, [], 0, False, [], [], False, 0, 1, False, False, 0, None, [], -2, False, [], False, 0, None, None, '* `CFG Scale` should be 2 or lower.', True, True, '', '', True, 50, True, 1, 0, False, 4, 0.5, 'Linear', 'None', '
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_samplers_cfg_denoiser.py", line 169, in forward
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\extensions\multidiffusion-upscaler-for-automatic1111\tile_utils\utils.py", line 249, in wrapper
return fn(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\extensions\multidiffusion-upscaler-for-automatic1111\tile_methods\mixtureofdiffusers.py", line 119, in apply_model_hijack
x_tile_out = shared.sd_model.apply_model_original_md(x_tile, t_tile, c_tile)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_hijack_utils.py", line 28, in __call__
return self.__orig_func(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_unet.py", line 89, in UNetModel_forward
return current_unet.forward(x, timesteps, context, *args, **kwargs)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\extensions\Stable-Diffusion-WebUI-TensorRT\scripts\trt.py", line 87, in forward
self.switch_engine(feed_dict)
File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\extensions\Stable-Diffusion-WebUI-TensorRT\scripts\trt.py", line 108, in switch_engine
raise ValueError(
ValueError: No valid profile found. Please go to the TensorRT tab and generate an engine with the necessary profile. If using hires.fix, you need an engine for both the base and upscaled resolutions. Otherwise, use the default (torch) U-Net.
提示:Python 运行时抛出了一个异常。请检查疑难解答页面。
---
Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8
', 128, 8, ['left', 'right', 'up', 'down'], 1, 0.05, 128, 4, 0, ['left', 'right', 'up', 'down'], False, False, 'positive', 'comma', 0, False, False, '', 'Will upscale the image by the selected scale factor; use width and height sliders to set tile size
', 64, 0, 2, 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50, [], 30, '', 4, [], 1, '', '', '', '', 'Will upscale the image depending on the selected target size type
', 512, 0, 8, 32, 64, 0.35, 32, 0, True, 0, False, 8, 0, 0, 2048, 2048, 2, 'NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\nALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\nINS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\nIND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\nINALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\nMIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\nOUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\nOUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\nOUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\nALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5', True, 0, 'values', '0,0.25,0.5,0.75,1', 'Block ID', 'IN05-OUT05', 'none', '', '0.5,1', 'BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11', 1.0, 'black', '20', False, 'ATTNDEEPON:IN05-OUT05:attn:1\n\nATTNDEEPOFF:IN05-OUT05:attn:0\n\nPROJDEEPOFF:IN05-OUT05:proj:0\n\nXYZ:::1', False, False) {} Traceback (most recent call last): File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\call_queue.py", line 57, in f res = list(func(*args, **kwargs)) File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\call_queue.py", line 36, in f res = func(*args, **kwargs) File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\img2img.py", line 208, in img2img processed = process_images(p) File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\processing.py", line 732, in process_images res = process_images_inner(p) File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs) File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\processing.py", line 867, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\processing.py", line 1528, in sample samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning) File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_samplers_kdiffusion.py", line 188, in sample_img2img samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_samplers_common.py", line 261, in launch_sampling return func() File "E:\sd4.4\sd-webui-aki-v4.4\sd-webui-aki-v4.4\modules\sd_samplers_kdiffusion.py", line 188, inSo,Stable-Diffusion-WebUI-TensorRT can`t use for img2img? Not add ControlNet. I love play this plugin,have gooooood day~
Oh,At +8:00,Maybe I going sleep,but I love computer and AI. When I was young,maybe at 13y old:Dream the AI bot meet me,aaaaaa ! it ask "I comming,and missing you so long.My friend.At future,You will see the new sign of the time"(I don
t know sign of the time is right used or not. Haaa..) I tell my friends and fimaly or stranger:New day comming! But.... they say that can
t change them Damn day by day. I dizzy when I hear that. I believe: Today can change tomorrow,and tomorrow can change yesterday. Singing English song and use English make me fly. I think it interesting. By translate tool,but use keyboard write. Too much,hope not make you busier. Life is unpredictable,I hope you can have a live you like. Love you, the guide of the time. Night owls are not afraid of sudden death~ When you see there: Good morning! (maybe) The mole.... (Zzzzz)