Open hylarucoder opened 1 year ago
sample of hidden object "prompt" conditions:using LoRA, Upscale Image, Original custom node("Send Webp Image to Eagle")
{
"3": {
"inputs": {
"seed": 804645408561758,
"steps": 40,
"cfg": 1.5,
"sampler_name": "dpmpp_2m_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"47",
0
],
"positive": [
"32",
0
],
"negative": [
"33",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler"
},
"4": {
"inputs": {
"ckpt_name": "agelesnate_v121.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"5": {
"inputs": {
"width": 768,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"34",
0
]
},
"class_type": "VAEDecode"
},
"17": {
"inputs": {
"seed": 804645408561758,
"steps": 20,
"cfg": 7,
"sampler_name": "dpmpp_2m_sde",
"scheduler": "karras",
"denoise": 0.5,
"model": [
"47",
0
],
"positive": [
"32",
0
],
"negative": [
"33",
0
],
"latent_image": [
"25",
0
]
},
"class_type": "KSampler"
},
"21": {
"inputs": {
"samples": [
"17",
0
],
"vae": [
"34",
0
]
},
"class_type": "VAEDecode"
},
"24": {
"inputs": {
"upscale_model": [
"26",
0
],
"image": [
"8",
0
]
},
"class_type": "ImageUpscaleWithModel"
},
"25": {
"inputs": {
"pixels": [
"27",
0
],
"vae": [
"34",
0
]
},
"class_type": "VAEEncode"
},
"26": {
"inputs": {
"model_name": "RealESRGAN_x4plus_anime_6B.pth"
},
"class_type": "UpscaleModelLoader"
},
"27": {
"inputs": {
"upscale_method": "area",
"scale_by": 0.5,
"image": [
"24",
0
]
},
"class_type": "ImageScaleBy"
},
"32": {
"inputs": {
"text": "1girl, solo, expressionless, silver hair, ponytail, purple eyes, face mask, ninja, blue bikini, medium breats, cleavage, scarf,",
"clip": [
"47",
1
]
},
"class_type": "CLIPTextEncode"
},
"33": {
"inputs": {
"text": "embeddings:EasyNegative, (worst quality, low quality, normal quality, bad anatomy:1.4), text, watermark,",
"clip": [
"47",
1
]
},
"class_type": "CLIPTextEncode"
},
"34": {
"inputs": {
"vae_name": "vae-ft-mse-840000-ema-pruned.safetensors"
},
"class_type": "VAELoader"
},
"47": {
"inputs": {
"lora_name": "oc_v10.safetensors",
"strength_model": 0.5,
"strength_clip": 0.5,
"model": [
"4",
0
],
"clip": [
"4",
1
]
},
"class_type": "LoraLoader"
},
"71": {
"inputs": {
"images": [
"21",
0
]
},
"class_type": "PreviewImage"
},
"91": {
"inputs": {
"lossless_webp": "lossy",
"compression": 80,
"positive_prompt": "1girl, solo, expressionless, silver hair, ponytail, purple eyes, face mask, ninja, blue bikini, medium breats, cleavage, scarf,",
"negative_prompt": "embeddings:EasyNegative, (worst quality, low quality, normal quality, bad anatomy:1.4), text, watermark,",
"annotation": "",
"images": [
"21",
0
]
},
"class_type": "Send Webp Image to Eagle"
}
}
2nd sample of hidden object "prompt" conditions:using SDXL Base & Refiner, Original custom node("Send Webp Image to Eagle")
{
"3": {
"inputs": {
"seed": 234288240797552,
"steps": 40,
"cfg": 8,
"sampler_name": "dpmpp_2m_sde",
"scheduler": "karras",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"32",
0
],
"negative": [
"33",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler"
},
"4": {
"inputs": {
"ckpt_name": "sdXL_v10VAEFix.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"5": {
"inputs": {
"width": 832,
"height": 1216,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"34",
0
]
},
"class_type": "VAEDecode"
},
"12": {
"inputs": {
"ascore": 6,
"width": 2048,
"height": 2048,
"text": "photo of beautiful age 18 girl, pastel hair, freckles sexy, beautiful, close up, young, dslr, 8k, 4k, ultrarealistic, realistic, natural skin, textured skin",
"clip": [
"30",
1
]
},
"class_type": "CLIPTextEncodeSDXLRefiner"
},
"16": {
"inputs": {
"ascore": 6,
"width": 2048,
"height": 2048,
"text": "prompt: text, watermark, low quality, medium quality, blurry, censored, wrinkles, deformed, mutated text, watermark, low quality, medium quality, blurry, censored, wrinkles, deformed, mutated",
"clip": [
"30",
1
]
},
"class_type": "CLIPTextEncodeSDXLRefiner"
},
"17": {
"inputs": {
"seed": 0,
"steps": 20,
"cfg": 8,
"sampler_name": "euler_ancestral",
"scheduler": "normal",
"denoise": 0.3000000000000001,
"model": [
"30",
0
],
"positive": [
"12",
0
],
"negative": [
"16",
0
],
"latent_image": [
"3",
0
]
},
"class_type": "KSampler"
},
"21": {
"inputs": {
"samples": [
"17",
0
],
"vae": [
"34",
0
]
},
"class_type": "VAEDecode"
},
"28": {
"inputs": {
"filename_prefix": "xl_output",
"images": [
"8",
0
]
},
"class_type": "SaveImage"
},
"30": {
"inputs": {
"ckpt_name": "sd_xl_refiner_1.0.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"32": {
"inputs": {
"text": "photo of beautiful age 18 girl, pastel hair, freckles sexy, beautiful, close up, young, dslr, 8k, 4k, ultrarealistic, realistic, natural skin, textured skin",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"33": {
"inputs": {
"text": "prompt: text, watermark, low quality, medium quality, blurry, censored, wrinkles, deformed, mutated text, watermark, low quality, medium quality, blurry, censored, wrinkles, deformed, mutated",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode"
},
"34": {
"inputs": {
"vae_name": "sdxl_vae.safetensors"
},
"class_type": "VAELoader"
},
"35": {
"inputs": {
"lossless_webp": "lossy",
"compression": 80,
"positive_prompt": "",
"negative_prompt": "",
"annotation": "",
"images": [
"21",
0
]
},
"class_type": "Send Webp Image to Eagle"
}
}
I believe that the following points make it difficult to analyze the generate information held by the hidden item 'prompt':
First, there are two 'CLIPTextEncoder's, each possessing a 'text' attribute. The only way to distinguish whether it's a prompt or a negative prompt is by checking which of the 'KSampler's 'positive' or 'negative' it's connected to. Second, if there are multiple 'KSampler's in the workflow, it's unclear whether each 'KSampler's prompt is being used for generation, for refinement, or for upscaling. To accurately understand this, one needs to comprehend the entire structure of the workflow and discern the purpose of that particular 'KSampler'.
I create a sample method to parse the prompts with ChatGPT. CONDITIONS: 2 "CLIPTextEncode" and same prompt(negative prompt) must be entered in the "KSampler" in the workflow.
@classmethod
def classify_text_from_json(cls, prompt):
# prompt is dict.( is not string. )
# Obtains the "text" attribute of an object whose "class_type" is "CLIPTextEncode"
clip_text_encode_items = {
k: v for k, v in prompt.items() if v["class_type"] == "CLIPTextEncode"
}
# Get "positive" and "negative" attributes for "class_type" of "KSampler"
positive_keys = []
negative_keys = []
for item in prompt.values():
if item["class_type"] == "KSampler":
if "positive" in item["inputs"]:
positive_keys.append(item["inputs"]["positive"][0])
if "negative" in item["inputs"]:
negative_keys.append(item["inputs"]["negative"][0])
# Obtains the corresponding text attribute and determines whether it is "positive" or "negative"
output = {}
for key in positive_keys:
output["Prompt"] = clip_text_encode_items[key]["inputs"]["text"]
for key in negative_keys:
output["Negative"] = clip_text_encode_items[key]["inputs"]["text"]
return output
USING:
gen_prompt = EaglePngInfo.classify_text_from_json(prompt)
print(gen_prompt["Prompt"])
print(gen_prompt["Negative"])