Closed bismark211 closed 1 year ago
can someone explain what's wrong?
it's probably your settings file. Please upload it here.
now generation has stopped on dpm++2m
Traceback (most recent call last): File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parsers\lalr_parser.py", line 126, in feed_token action, arg = states[state][token.type] KeyError: '$END'
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "C:\stable-diffusion-webui\modules\call_queue.py", line 56, in f res = list(func(*args, kwargs)) File "C:\stable-diffusion-webui\modules\call_queue.py", line 37, in f res = func(*args, *kwargs) File "C:\stable-diffusion-webui\extensions\deforum-for-automatic1111-webui\scripts\deforum.py", line 85, in run_deforum render_animation(args, anim_args, video_args, parseq_args, loop_args, root.animation_prompts, root) File "C:\stable-diffusion-webui/extensions/deforum-for-automatic1111-webui/scripts\deforum_helpers\render.py", line 339, in render_animation image = generate(args, anim_args, loop_args, root, frame_idx, sampler_name=scheduled_sampler_name) File "C:\stable-diffusion-webui/extensions/deforum-for-automatic1111-webui/scripts\deforum_helpers\generate.py", line 197, in generate processed = processing.process_images(p) File "C:\stable-diffusion-webui\modules\processing.py", line 485, in process_images res = process_images_inner(p) File "C:\stable-diffusion-webui\modules\processing.py", line 617, in process_images_inner c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) File "C:\stable-diffusion-webui\modules\processing.py", line 571, in get_conds_with_caching cache[1] = function(shared.sd_model, required_prompts, steps) File "C:\stable-diffusion-webui\modules\prompt_parser.py", line 205, in get_multicond_learned_conditioning learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps) File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\lib_prompt_fusion\hijacker.py", line 15, in wrapper return function(args, kwargs, original_function=self.__original_functions[attribute]) File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\scripts\promptlang.py", line 25, in _hijacked_get_learned_conditioning tensor_builders = _parse_tensor_builders(prompts, total_steps) File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\scripts\promptlang.py", line 41, in _parse_tensor_builders expr = parse_prompt(prompt) File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\lib_prompt_fusion\prompt_parser.py", line 130, in parse_prompt return parse_expression(prompt.lstrip()) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\lark.py", line 625, in parse return self.parser.parse(text, start=start, on_error=on_error) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parser_frontends.py", line 96, in parse return self.parser.parse(stream, chosen_start, **kw) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parsers\lalr_parser.py", line 41, in parse return self.parser.parse(lexer, start) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parsers\lalr_parser.py", line 171, in parse return self.parse_from_state(parser_state) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parsers\lalr_parser.py", line 188, in parse_from_state raise e File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parsers\lalr_parser.py", line 182, in parse_from_state return state.feed_token(end_token, True) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parsers\lalr_parser.py", line 129, in feed_token raise UnexpectedToken(token, expected, state=self, interactive_parser=None) lark.exceptions.UnexpectedToken: Unexpected token Token('$END', '') at line 1, column 222. Expected one of:
это, вероятно, ваш файл настроек. Пожалуйста, загрузите его сюда.
"samples_save": true,
"samples_format": "png",
"samples_filename_pattern": "",
"save_images_add_number": true,
"grid_save": false,
"grid_format": "png",
"grid_extended_filename": false,
"grid_only_if_multiple": true,
"grid_prevent_empty_spots": false,
"n_rows": -1,
"enable_pnginfo": false,
"save_txt": true,
"save_images_before_face_restoration": false,
"save_images_before_highres_fix": false,
"save_images_before_color_correction": false,
"jpeg_quality": 80,
"export_for_4chan": false,
"use_original_name_batch": false,
"use_upscaler_name_as_suffix": false,
"save_selected_only": true,
"do_not_add_watermark": true,
"temp_dir": "",
"clean_temp_dir_at_start": false,
"outdir_samples": "",
"outdir_txt2img_samples": "outputs/txt2img-images",
"outdir_img2img_samples": "outputs/img2img-images",
"outdir_extras_samples": "outputs/extras-images",
"outdir_grids": "",
"outdir_txt2img_grids": "outputs/txt2img-grids",
"outdir_img2img_grids": "outputs/img2img-grids",
"outdir_save": "log/images",
"save_to_dirs": false,
"grid_save_to_dirs": false,
"use_save_to_dirs_for_ui": false,
"directories_filename_pattern": "",
"directories_max_prompt_words": 8,
"ESRGAN_tile": 192,
"ESRGAN_tile_overlap": 8,
"realesrgan_enabled_models": [
"R-ESRGAN 4x+",
"R-ESRGAN General 4xV3",
"R-ESRGAN General WDN 4xV3"
],
"upscaler_for_img2img": null,
"face_restoration_model": "GFPGAN",
"code_former_weight": 0.5,
"face_restoration_unload": false,
"show_warnings": false,
"memmon_poll_rate": 8,
"samples_log_stdout": false,
"multiple_tqdm": true,
"print_hypernet_extra": false,
"unload_models_when_training": false,
"pin_memory": false,
"save_optimizer_state": true,
"save_training_settings_to_txt": true,
"dataset_filename_word_regex": "",
"dataset_filename_join_string": " ",
"training_image_repeats_per_epoch": 1,
"training_write_csv_every": 500,
"training_xattention_optimizations": false,
"training_enable_tensorboard": false,
"training_tensorboard_save_images": false,
"training_tensorboard_flush_every": 120,
"sd_model_checkpoint": "realisticVisionV12_v12.safetensors [8194f84cdc]",
"sd_checkpoint_cache": 10,
"sd_vae_checkpoint_cache": 10,
"sd_vae": "v1-5-pruned-emaonly.vae.pt",
"sd_vae_as_default": true,
"inpainting_mask_weight": 1.0,
"initial_noise_multiplier": 1.0,
"img2img_color_correction": false,
"img2img_fix_steps": false,
"img2img_background_color": "#ffffff",
"enable_quantization": true,
"enable_emphasis": true,
"enable_batch_seeds": true,
"comma_padding_backtrack": 20,
"CLIP_stop_at_last_layers": 1,
"upcast_attn": true,
"use_old_emphasis_implementation": true,
"use_old_karras_scheduler_sigmas": false,
"use_old_hires_fix_width_height": false,
"interrogate_keep_models_in_memory": false,
"interrogate_return_ranks": false,
"interrogate_clip_num_beams": 1,
"interrogate_clip_min_length": 24,
"interrogate_clip_max_length": 240,
"interrogate_clip_dict_limit": 0.0,
"interrogate_clip_skip_categories": [],
"interrogate_deepbooru_score_threshold": 0.7262276,
"deepbooru_sort_alpha": true,
"deepbooru_use_spaces": false,
"deepbooru_escape": true,
"deepbooru_filter_tags": "",
"extra_networks_default_view": "cards",
"extra_networks_default_multiplier": 1.0,
"sd_hypernetwork": "",
"return_grid": true,
"do_not_show_images": false,
"add_model_hash_to_info": true,
"add_model_name_to_info": true,
"disable_weights_auto_swap": false,
"send_seed": true,
"send_size": true,
"font": "",
"js_modal_lightbox": true,
"js_modal_lightbox_initially_zoomed": true,
"show_progress_in_title": true,
"samplers_in_dropdown": true,
"dimensions_and_batch_together": true,
"keyedit_precision_attention": 0.1,
"keyedit_precision_extra": 0.05,
"quicksettings": "sd_model_checkpoint",
"ui_reorder": "inpaint, sampler, dimensions, cfg, seed, checkboxes, hires_fix, batch, scripts",
"ui_extra_networks_tab_reorder": "",
"localization": "None",
"show_progressbar": true,
"live_previews_enable": false,
"show_progress_grid": false,
"show_progress_every_n_steps": 10,
"show_progress_type": "Approx NN",
"live_preview_content": "Prompt",
"live_preview_refresh_period": 1000,
"hide_samplers": [],
"eta_ddim": 1,
"eta_ancestral": 1.0,
"ddim_discretize": "quad",
"s_churn": 0.0,
"s_tmin": 0.0,
"s_noise": 1.0,
"eta_noise_seed_delta": 0,
"always_discard_next_to_last_sigma": false,
"postprocessing_enable_in_main_ui": [],
"postprocessing_operation_order": [],
"upscaling_max_images_in_cache": 5,
"disabled_extensions": [],
"sd_checkpoint_hash": "8194f84cdce2d9d782c4b1d32ddc4c585819aa270bb4cb60e9eb3710c6a38ff3",
"ldsr_steps": 100,
"ldsr_cached": false,
"SWIN_tile": 192,
"SWIN_tile_overlap": 8,
"sd_lora": "None",
"lora_apply_to_outputs": false,
"depthmap_script_boost_rmax": 1600.0,
"depthmap_script_sbsflip": false,
"outdir_ip2p_samples": "outputs/ip2p-images",
"animatoranon_film_folder": "C:/AI/frame_interpolation/film.bat",
"animatoranon_prop_folder": "c:/ai/props",
"animatoranon_output_folder": "",
"dd_save_previews": false,
"outdir_ddetailer_previews": "extensions/ddetailer/outputs/masks-previews",
"dd_save_masks": false,
"outdir_ddetailer_masks": "extensions/ddetailer/outputs/masks",
"dp_ignore_whitespace": false,
"dp_write_raw_template": false,
"dp_write_prompts_to_file": false,
"ais_exif_pnginfo_group": [],
"ais_windows_tag_group": [],
"ais_windows_category_group": [],
"ais_generation_params_text_group": [],
"ais_force_cpu": false,
"promptgen_names": "AUTOMATIC/promptgen-lexart, AUTOMATIC/promptgen-majinai-safe, AUTOMATIC/promptgen-majinai-unsafe",
"promptgen_device": "gpu"
}
this setting file is not what I requested. Also what's the $END?
It's a bit hard helping that way, please upload .txt files if they are big :)
Thanks!
which file do you need to understand the problem?
after you have all of your settings filled in, click this button ^ and send the file you get from it
@bismark211 ?
I don’t want to lay out my settings, I don’t think it’s about them, since the break occurs on different samplers and on different numbers of steps, and when restarted, it can work under the same conditions until the end
I have a feeling, once installed (it on_ui_tabs method, just like my RPC extension does) it hijacks the prompt parsing handler and something in the prompts (like math) break it
From your log
File "C:\stable-diffusion-webui\modules\prompt_parser.py", line 205, in get_multicond_learned_conditioning
learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps)
File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\lib_prompt_fusion\hijacker.py", line 15, in wrapper
return function(*args, **kwargs, original_function=self.__original_functions[attribute])
File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\scripts\promptlang.py", line 25, in _hijacked_get_learned_conditioning
tensor_builders = _parse_tensor_builders(prompts, total_steps)
File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\scripts\promptlang.py", line 41, in _parse_tensor_builders
expr = parse_prompt(prompt)
File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\lib_prompt_fusion\prompt_parser.py", line 130, in parse_prompt
Is it possible for you to disable that extension while you are launching Deforum animations?
need to remove the promt-fusion-extension?
Yeah, temporary
what do we do here?
Traceback (most recent call last): File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\lexer.py", line 528, in lex yield lexer.next_token(lexer_state, parser_state) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\lexer.py", line 466, in next_token raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column, lark.exceptions.UnexpectedCharacters: No terminal matches 's' in the current parser context, at line 1 col 28
(cyberpunk night club:1.0, speakers), volumetric lighting, elite, m ^ Expected one of:
Previous tokens: Token('COMMA', ',')
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "C:\stable-diffusion-webui\modules\call_queue.py", line 56, in f res = list(func(*args, kwargs)) File "C:\stable-diffusion-webui\modules\call_queue.py", line 37, in f res = func(*args, *kwargs) File "C:\stable-diffusion-webui\extensions\deforum-for-automatic1111-webui\scripts\deforum.py", line 85, in run_deforum render_animation(args, anim_args, video_args, parseq_args, loop_args, root.animation_prompts, root) File "C:\stable-diffusion-webui/extensions/deforum-for-automatic1111-webui/scripts\deforum_helpers\render.py", line 339, in render_animation image = generate(args, anim_args, loop_args, root, frame_idx, sampler_name=scheduled_sampler_name) File "C:\stable-diffusion-webui/extensions/deforum-for-automatic1111-webui/scripts\deforum_helpers\generate.py", line 197, in generate processed = processing.process_images(p) File "C:\stable-diffusion-webui\modules\processing.py", line 485, in process_images res = process_images_inner(p) File "C:\stable-diffusion-webui\modules\processing.py", line 617, in process_images_inner c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) File "C:\stable-diffusion-webui\modules\processing.py", line 571, in get_conds_with_caching cache[1] = function(shared.sd_model, required_prompts, steps) File "C:\stable-diffusion-webui\modules\prompt_parser.py", line 205, in get_multicond_learned_conditioning learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps) File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\lib_prompt_fusion\hijacker.py", line 15, in wrapper return function(args, kwargs, original_function=self.__original_functions[attribute]) File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\scripts\promptlang.py", line 25, in _hijacked_get_learned_conditioning tensor_builders = _parse_tensor_builders(prompts, total_steps) File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\scripts\promptlang.py", line 41, in _parse_tensor_builders expr = parse_prompt(prompt) File "C:\stable-diffusion-webui\extensions\prompt-fusion-extension\lib_prompt_fusion\prompt_parser.py", line 130, in parse_prompt return parse_expression(prompt.lstrip()) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\lark.py", line 625, in parse return self.parser.parse(text, start=start, on_error=on_error) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parser_frontends.py", line 96, in parse return self.parser.parse(stream, chosen_start, **kw) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parsers\lalr_parser.py", line 41, in parse return self.parser.parse(lexer, start) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parsers\lalr_parser.py", line 171, in parse return self.parse_from_state(parser_state) File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parsers\lalr_parser.py", line 188, in parse_from_state raise e File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\parsers\lalr_parser.py", line 178, in parse_from_state for token in state.lexer.lex(state): File "C:\stable-diffusion-webui\venv\lib\site-packages\lark\lexer.py", line 537, in lex raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name) lark.exceptions.UnexpectedToken: Unexpected token Token('SYMBOL', 'speakers') at line 1, column 28. Expected one of: