import spaces import os import io import torch import gradio as gr import requests from diffusers import DiffusionPipeline # ========================================================= # CONFIG # ========================================================= SCRIPTS_REPO_API = ( "https://api.github.com/repos/asomoza/diffusers-recipes/contents/" "models/z-image/scripts" ) LOCAL_SCRIPTS_DIR = "z_image_scripts" MODEL_ID = "Tongyi-MAI/Z-Image-Turbo" os.makedirs(LOCAL_SCRIPTS_DIR, exist_ok=True) # ========================================================= # GLOBAL STATE (CPU SAFE) # ========================================================= SCRIPT_CODE = {} # script_name -> code (CPU only) PIPELINES = {} # script_name -> pipeline (GPU only, lazy) log_buffer = io.StringIO() # ========================================================= # LOGGING # ========================================================= def log(msg): print(msg) log_buffer.write(msg + "\n") def pipeline_debug_info(pipe): return f""" Pipeline Info ------------- Device: {pipe.device} Transformer: {pipe.transformer.__class__.__name__} VAE: {pipe.vae.__class__.__name__} """ def latent_shape_info(height, width, pipe): h = height // pipe.vae_scale_factor w = width // pipe.vae_scale_factor return f"Expected latent size: ({h}, {w})" # ========================================================= # DOWNLOAD SCRIPTS (CPU ONLY) # ========================================================= def download_scripts(): resp = requests.get(SCRIPTS_REPO_API) resp.raise_for_status() scripts = [] for item in resp.json(): if item["name"].endswith(".py"): scripts.append(item["name"]) path = os.path.join(LOCAL_SCRIPTS_DIR, item["name"]) if not os.path.exists(path): content = requests.get(item["download_url"]).text with open(path, "w") as f: f.write(content) return sorted(scripts) SCRIPT_NAMES = download_scripts() # ========================================================= # REGISTER SELECTED SCRIPTS (NO CUDA) # ========================================================= def register_scripts(selected_scripts): SCRIPT_CODE.clear() for name in selected_scripts: path = os.path.join(LOCAL_SCRIPTS_DIR, name) with open(path, "r") as f: SCRIPT_CODE[name] = f.read() return f"{len(SCRIPT_CODE)} script(s) registered โœ…" # ========================================================= # GPU-ONLY PIPELINE BUILDER (CRITICAL) # ========================================================= def get_pipeline(script_name): if script_name in PIPELINES: return PIPELINES[script_name] log(f"๐Ÿ”ง Building pipeline from {script_name}") namespace = { "__file__": script_name, "__name__": "__main__", # Minimal required globals "torch": torch, } try: exec(SCRIPT_CODE[script_name], namespace) except Exception as e: log(f"โŒ Script failed: {script_name}") raise RuntimeError(f"Pipeline build failed for {script_name}") from e if "pipe" not in namespace: raise RuntimeError( f"{script_name} did not define `pipe`.\n" f"Each script MUST assign a variable named `pipe`." ) PIPELINES[script_name] = namespace["pipe"] log(f"โœ… Pipeline ready: {script_name}") return PIPELINES[script_name] # ========================================================= # IMAGE GENERATION (LOGIC UNCHANGED) # ========================================================= @spaces.GPU def generate_image( prompt, height, width, num_inference_steps, seed, randomize_seed, num_images, pipeline_name, ): log_buffer.truncate(0) log_buffer.seek(0) if pipeline_name not in SCRIPT_CODE: raise RuntimeError("Pipeline not registered") pipe = get_pipeline(pipeline_name) # โœ… Correct, universal, ZeroGPU-safe if not hasattr(pipe, "hf_device_map"): pipe = pipe.to("cuda") log("=== NEW GENERATION REQUEST ===") log(f"Pipeline: {pipeline_name}") log(f"Prompt: {prompt}") log(f"Height: {height}, Width: {width}") log(f"Steps: {num_inference_steps}") log(f"Images: {num_images}") if randomize_seed: seed = torch.randint(0, 2**32 - 1, (1,)).item() log(f"Random Seed โ†’ {seed}") else: log(f"Seed โ†’ {seed}") num_images = min(max(1, int(num_images)), 3) generator = torch.Generator("cuda").manual_seed(int(seed)) result = pipe( prompt=prompt, height=int(height), width=int(width), num_inference_steps=int(num_inference_steps), guidance_scale=0.0, generator=generator, max_sequence_length=1024, num_images_per_prompt=num_images, output_type="pil", ) try: log(pipeline_debug_info(pipe)) log(latent_shape_info(height, width, pipe)) except Exception as e: log(f"Diagnostics error: {e}") log("Generation complete โœ…") return result.images, seed, log_buffer.getvalue() # ========================================================= # GRADIO UI # ========================================================= with gr.Blocks(title="Z-Image Turbo โ€“ ZeroGPU") as demo: gr.Markdown("## โšก Z-Image Turbo (Script-Driven ยท ZeroGPU Safe)") script_selector = gr.CheckboxGroup( choices=SCRIPT_NAMES, label="Select pipeline scripts", ) register_btn = gr.Button("Register Scripts") status = gr.Textbox(label="Status", interactive=False) register_btn.click( register_scripts, inputs=[script_selector], outputs=[status], ) pipeline_picker = gr.Dropdown( choices=[], label="Active Pipeline", ) register_btn.click( lambda s: gr.update(choices=s, value=s[0] if s else None), inputs=[script_selector], outputs=[pipeline_picker], ) gr.Markdown("---") prompt = gr.Textbox(label="Prompt", lines=3) height = gr.Slider(256, 1024, 512, step=64, label="Height") width = gr.Slider(256, 1024, 512, step=64, label="Width") steps = gr.Slider(1, 8, 4, step=1, label="Inference Steps") images = gr.Slider(1, 3, 1, step=1, label="Images") seed = gr.Number(value=0, label="Seed") random_seed = gr.Checkbox(value=True, label="Randomize Seed") run_btn = gr.Button("Generate") gallery = gr.Gallery(columns=3) used_seed = gr.Number(label="Used Seed") logs = gr.Textbox(lines=12, label="Logs") run_btn.click( generate_image, inputs=[ prompt, height, width, steps, seed, random_seed, images, pipeline_picker, ], outputs=[gallery, used_seed, logs], ) demo.queue() demo.launch()