enzyme69 / blendersushi

Blender Sushi related scripts. Mostly about Sverchok, Geometry Nodes, Animation Nodes, and related Python scripts.
244 stars 33 forks source link

Blender Talk to ComfyUI #1349

Open enzyme69 opened 11 months ago

enzyme69 commented 11 months ago
# SDXL Turbo model is used, but LoRA LCM was also fast

import bpy
import json
import requests
import random

# ComfyUI server address
comfyui_address = "http://127.0.0.1:8188"

def queue_prompt(prompt_workflow):
    p = {"prompt": prompt_workflow}

    # Convert prompt workflow to JSON
    workflow_json = json.dumps(p).encode('utf-8')

    # Send POST request to ComfyUI server
    response = requests.post(comfyui_address + "/prompt", data=workflow_json)

    # Check if request was successful
    if response.status_code != 200:
        print("Error sending prompt to ComfyUI:", response.status_code)
        print(response.text)
    else:
        print("Prompt successfully sent to ComfyUI")

# Read workflow API data from file and convert it into dictionary
prompt_workflow = json.load(open("/Users/jimmygunawan/Downloads/inter_combine_workflow6.json"))

# Create a list of prompts

#prompt_list = []

# Access the text block and append for ComfyUI
# Get the text datablock
#text_datablock = bpy.data.texts['MonkeyRobot']

# Get the text content
#text_content = text_datablock.lines

#if text_datablock:
    # Read the contents of the text block
    #for i in text_content:
        #print(i.body)
        #prompt_list.append(i.body)
#else:
    #print(f"Text block '{text_block_name}' not found.")

#prompt_list = []
#prompt_list.append("photo of a bearded man Mario wearing a red hat sitting alone in a cafe")
#prompt_list.append("oil painting of a llama standing in the middle of a busy street")
#prompt_list.append("water colour art of a fish sitting in a tree")
#prompt_list.append("beautiful mushroom glass bottle landscape, purple galaxy bottle")
#prompt_list.append("princess peach on abandoned castle reading book at night, fireflies")
#prompt_list.append("head totem of monkey head, made of mecha garbage recycle, in the middle of empty park")

# Give some easy-to-remember names to the nodes
load_image_node1 = prompt_workflow["48"]
load_image_node2 = prompt_workflow["49"]
prompt_pos_node = prompt_workflow["6"]
#seed_node = prompt_workflow["4"]
#save_image_node = prompt_workflow["1"]
#empty_latent_img_node = prompt_workflow["1"]

# Set image dimensions and batch size in EmptyLatentImage node
#empty_latent_img_node["inputs"]["width"] = 512
#empty_latent_img_node["inputs"]["height"] = 640
#empty_latent_img_node["inputs"]["num_images"] = 1

# For every prompt in prompt_list...
for i in range(166):
    load_image_node1["inputs"]["skip_first_frames"] = i
    load_image_node2["inputs"]["skip_first_frames"] = i

    # Text prompt positive assign
    prompt_pos_node["inputs"]["text"] = "(solo) cyborg woman (fox ears:1.1), (blonde hair:1.0), messy hair, sky clouds, standing in a grass field, (chibi), blue eyes"

    queue_prompt(prompt_workflow)

#for index, prompt in enumerate(prompt_list):
    # Set the text prompt for positive CLIPTextEncode node
    #prompt_pos_node["inputs"]["text"] = prompt + " made of garbage, waste recycles, wabisabi, imperfect"

    # Set a random seed in KSampler node
    #seed_node["inputs"]["noise_seed"] = random.randint(1, 18446744073709551614)

    # Set filename prefix to be the same as prompt
    # (truncate to first 100 chars if necessary)
    #fileprefix = prompt
    #if len(fileprefix) > 100:
    #    fileprefix = fileprefix[:100]

    #save_image_node["inputs"]["filename_prefix"] = fileprefix

    # Everything set, add entire workflow to queue.
    #queue_prompt(prompt_workflow)
ttodosi commented 11 months ago

can you provide inter_combine_workflow6.json too? I've got the two talking, just not so good at figuring out what they are saying yet.