rahul7star commited on
Commit
37700b8
·
verified ·
1 Parent(s): 76523a3

Update app_lora1.py

Browse files
Files changed (1) hide show
  1. app_lora1.py +74 -62
app_lora1.py CHANGED
@@ -1,16 +1,15 @@
1
  import spaces
2
  import os
3
  import io
4
- import sys
5
  import torch
6
 
7
  import gradio as gr
8
  import requests
9
  from diffusers import DiffusionPipeline
10
 
11
- # =========================
12
  # CONFIG
13
- # =========================
14
  SCRIPTS_REPO_API = (
15
  "https://api.github.com/repos/asomoza/diffusers-recipes/contents/"
16
  "models/z-image/scripts"
@@ -20,14 +19,17 @@ MODEL_ID = "Tongyi-MAI/Z-Image-Turbo"
20
 
21
  os.makedirs(LOCAL_SCRIPTS_DIR, exist_ok=True)
22
 
23
- pipelines = {} # script_name -> pipeline
24
- active_pipeline = None
 
 
 
25
  log_buffer = io.StringIO()
26
 
27
 
28
- # =========================
29
  # LOGGING
30
- # =========================
31
  def log(msg):
32
  print(msg)
33
  log_buffer.write(msg + "\n")
@@ -49,9 +51,9 @@ def latent_shape_info(height, width, pipe):
49
  return f"Expected latent size: ({h}, {w})"
50
 
51
 
52
- # =========================
53
- # DOWNLOAD SCRIPTS
54
- # =========================
55
  def download_scripts():
56
  resp = requests.get(SCRIPTS_REPO_API)
57
  resp.raise_for_status()
@@ -72,49 +74,50 @@ def download_scripts():
72
  SCRIPT_NAMES = download_scripts()
73
 
74
 
75
- # =========================
76
- # PIPELINE BUILDER (SCRIPT-DRIVEN)
77
- # =========================
78
- def build_pipelines(selected_scripts):
79
- global pipelines, active_pipeline
80
 
81
- pipelines.clear()
82
- active_pipeline = None
 
 
83
 
84
- for script_name in selected_scripts:
85
- log(f"\n=== Building pipeline from {script_name} ===")
86
 
87
- script_path = os.path.join(LOCAL_SCRIPTS_DIR, script_name)
88
 
89
- # Each script runs in its own namespace
90
- namespace = {
91
- "torch": torch,
92
- "DiffusionPipeline": DiffusionPipeline,
93
- }
 
 
94
 
95
- with open(script_path, "r") as f:
96
- code = f.read()
97
 
98
- # Execute script (this must define `pipe`)
99
- exec(code, namespace)
 
 
100
 
101
- if "pipe" not in namespace:
102
- raise RuntimeError(f"{script_name} did not create `pipe`")
103
 
104
- pipelines[script_name] = namespace["pipe"]
105
- log(f"Loaded pipeline from {script_name}")
106
 
107
- # Set default active pipeline
108
- if pipelines:
109
- active_pipeline = pipelines[selected_scripts[0]]
110
- log(f"Active pipeline → {selected_scripts[0]}")
111
 
112
- return f"Loaded {len(pipelines)} pipeline(s) ✅"
113
 
114
 
115
- # =========================
116
- # IMAGE GENERATION (UNCHANGED)
117
- # =========================
118
  @spaces.GPU
119
  def generate_image(
120
  prompt,
@@ -124,24 +127,28 @@ def generate_image(
124
  seed,
125
  randomize_seed,
126
  num_images,
127
- selected_pipeline_name,
128
  ):
129
- global active_pipeline
130
-
131
  log_buffer.truncate(0)
132
  log_buffer.seek(0)
133
 
134
- if selected_pipeline_name not in pipelines:
135
- raise RuntimeError("Pipeline not built")
136
 
137
- pipe = pipelines[selected_pipeline_name]
138
 
139
  log("=== NEW GENERATION REQUEST ===")
140
- log(f"Pipeline: {selected_pipeline_name}")
141
  log(f"Prompt: {prompt}")
 
 
 
142
 
143
  if randomize_seed:
144
  seed = torch.randint(0, 2**32 - 1, (1,)).item()
 
 
 
145
 
146
  num_images = min(max(1, int(num_images)), 3)
147
  generator = torch.Generator("cuda").manual_seed(int(seed))
@@ -158,28 +165,33 @@ def generate_image(
158
  output_type="pil",
159
  )
160
 
161
- log(pipeline_debug_info(pipe))
162
- log("Done.")
 
 
 
 
 
163
 
164
  return result.images, seed, log_buffer.getvalue()
165
 
166
 
167
- # =========================
168
  # GRADIO UI
169
- # =========================
170
- with gr.Blocks(title="Z-Image Turbo – Script Pipelines") as demo:
171
- gr.Markdown("## ⚡ Z-Image Turbo (Script-Driven Pipelines, ZeroGPU)")
172
 
173
  script_selector = gr.CheckboxGroup(
174
  choices=SCRIPT_NAMES,
175
  label="Select pipeline scripts",
176
  )
177
 
178
- build_btn = gr.Button("Build Pipelines")
179
- status = gr.Textbox(label="Status")
180
 
181
- build_btn.click(
182
- build_pipelines,
183
  inputs=[script_selector],
184
  outputs=[status],
185
  )
@@ -189,7 +201,7 @@ with gr.Blocks(title="Z-Image Turbo – Script Pipelines") as demo:
189
  label="Active Pipeline",
190
  )
191
 
192
- build_btn.click(
193
  lambda s: gr.update(choices=s, value=s[0] if s else None),
194
  inputs=[script_selector],
195
  outputs=[pipeline_picker],
@@ -200,18 +212,18 @@ with gr.Blocks(title="Z-Image Turbo – Script Pipelines") as demo:
200
  prompt = gr.Textbox(label="Prompt", lines=3)
201
  height = gr.Slider(256, 1024, 512, step=64, label="Height")
202
  width = gr.Slider(256, 1024, 512, step=64, label="Width")
203
- steps = gr.Slider(1, 8, 4, step=1, label="Steps")
204
  images = gr.Slider(1, 3, 1, step=1, label="Images")
205
  seed = gr.Number(value=0, label="Seed")
206
  random_seed = gr.Checkbox(value=True, label="Randomize Seed")
207
 
208
- run = gr.Button("Generate")
209
 
210
  gallery = gr.Gallery(columns=3)
211
  used_seed = gr.Number(label="Used Seed")
212
  logs = gr.Textbox(lines=12, label="Logs")
213
 
214
- run.click(
215
  generate_image,
216
  inputs=[
217
  prompt,
 
1
  import spaces
2
  import os
3
  import io
 
4
  import torch
5
 
6
  import gradio as gr
7
  import requests
8
  from diffusers import DiffusionPipeline
9
 
10
+ # =========================================================
11
  # CONFIG
12
+ # =========================================================
13
  SCRIPTS_REPO_API = (
14
  "https://api.github.com/repos/asomoza/diffusers-recipes/contents/"
15
  "models/z-image/scripts"
 
19
 
20
  os.makedirs(LOCAL_SCRIPTS_DIR, exist_ok=True)
21
 
22
+ # =========================================================
23
+ # GLOBAL STATE (CPU SAFE)
24
+ # =========================================================
25
+ SCRIPT_CODE = {} # script_name -> code (CPU only)
26
+ PIPELINES = {} # script_name -> pipeline (GPU only, lazy)
27
  log_buffer = io.StringIO()
28
 
29
 
30
+ # =========================================================
31
  # LOGGING
32
+ # =========================================================
33
  def log(msg):
34
  print(msg)
35
  log_buffer.write(msg + "\n")
 
51
  return f"Expected latent size: ({h}, {w})"
52
 
53
 
54
+ # =========================================================
55
+ # DOWNLOAD SCRIPTS (CPU ONLY)
56
+ # =========================================================
57
  def download_scripts():
58
  resp = requests.get(SCRIPTS_REPO_API)
59
  resp.raise_for_status()
 
74
  SCRIPT_NAMES = download_scripts()
75
 
76
 
77
+ # =========================================================
78
+ # REGISTER SELECTED SCRIPTS (NO CUDA)
79
+ # =========================================================
80
+ def register_scripts(selected_scripts):
81
+ SCRIPT_CODE.clear()
82
 
83
+ for name in selected_scripts:
84
+ path = os.path.join(LOCAL_SCRIPTS_DIR, name)
85
+ with open(path, "r") as f:
86
+ SCRIPT_CODE[name] = f.read()
87
 
88
+ return f"{len(SCRIPT_CODE)} script(s) registered ✅"
 
89
 
 
90
 
91
+ # =========================================================
92
+ # GPU-ONLY PIPELINE BUILDER (CRITICAL)
93
+ # =========================================================
94
+ @spaces.GPU
95
+ def get_pipeline(script_name):
96
+ if script_name in PIPELINES:
97
+ return PIPELINES[script_name]
98
 
99
+ log(f"🔧 Building pipeline from {script_name}")
 
100
 
101
+ namespace = {
102
+ "torch": torch,
103
+ "DiffusionPipeline": DiffusionPipeline,
104
+ }
105
 
106
+ # Execute script logic (this WILL touch CUDA)
107
+ exec(SCRIPT_CODE[script_name], namespace)
108
 
109
+ if "pipe" not in namespace:
110
+ raise RuntimeError(f"{script_name} did not define `pipe`")
111
 
112
+ PIPELINES[script_name] = namespace["pipe"]
113
+ log(f"✅ Pipeline ready: {script_name}")
 
 
114
 
115
+ return PIPELINES[script_name]
116
 
117
 
118
+ # =========================================================
119
+ # IMAGE GENERATION (LOGIC UNCHANGED)
120
+ # =========================================================
121
  @spaces.GPU
122
  def generate_image(
123
  prompt,
 
127
  seed,
128
  randomize_seed,
129
  num_images,
130
+ pipeline_name,
131
  ):
 
 
132
  log_buffer.truncate(0)
133
  log_buffer.seek(0)
134
 
135
+ if pipeline_name not in SCRIPT_CODE:
136
+ raise RuntimeError("Pipeline not registered")
137
 
138
+ pipe = get_pipeline(pipeline_name)
139
 
140
  log("=== NEW GENERATION REQUEST ===")
141
+ log(f"Pipeline: {pipeline_name}")
142
  log(f"Prompt: {prompt}")
143
+ log(f"Height: {height}, Width: {width}")
144
+ log(f"Steps: {num_inference_steps}")
145
+ log(f"Images: {num_images}")
146
 
147
  if randomize_seed:
148
  seed = torch.randint(0, 2**32 - 1, (1,)).item()
149
+ log(f"Random Seed → {seed}")
150
+ else:
151
+ log(f"Seed → {seed}")
152
 
153
  num_images = min(max(1, int(num_images)), 3)
154
  generator = torch.Generator("cuda").manual_seed(int(seed))
 
165
  output_type="pil",
166
  )
167
 
168
+ try:
169
+ log(pipeline_debug_info(pipe))
170
+ log(latent_shape_info(height, width, pipe))
171
+ except Exception as e:
172
+ log(f"Diagnostics error: {e}")
173
+
174
+ log("Generation complete ✅")
175
 
176
  return result.images, seed, log_buffer.getvalue()
177
 
178
 
179
+ # =========================================================
180
  # GRADIO UI
181
+ # =========================================================
182
+ with gr.Blocks(title="Z-Image Turbo – ZeroGPU") as demo:
183
+ gr.Markdown("## ⚡ Z-Image Turbo (Script-Driven · ZeroGPU Safe)")
184
 
185
  script_selector = gr.CheckboxGroup(
186
  choices=SCRIPT_NAMES,
187
  label="Select pipeline scripts",
188
  )
189
 
190
+ register_btn = gr.Button("Register Scripts")
191
+ status = gr.Textbox(label="Status", interactive=False)
192
 
193
+ register_btn.click(
194
+ register_scripts,
195
  inputs=[script_selector],
196
  outputs=[status],
197
  )
 
201
  label="Active Pipeline",
202
  )
203
 
204
+ register_btn.click(
205
  lambda s: gr.update(choices=s, value=s[0] if s else None),
206
  inputs=[script_selector],
207
  outputs=[pipeline_picker],
 
212
  prompt = gr.Textbox(label="Prompt", lines=3)
213
  height = gr.Slider(256, 1024, 512, step=64, label="Height")
214
  width = gr.Slider(256, 1024, 512, step=64, label="Width")
215
+ steps = gr.Slider(1, 8, 4, step=1, label="Inference Steps")
216
  images = gr.Slider(1, 3, 1, step=1, label="Images")
217
  seed = gr.Number(value=0, label="Seed")
218
  random_seed = gr.Checkbox(value=True, label="Randomize Seed")
219
 
220
+ run_btn = gr.Button("Generate")
221
 
222
  gallery = gr.Gallery(columns=3)
223
  used_seed = gr.Number(label="Used Seed")
224
  logs = gr.Textbox(lines=12, label="Logs")
225
 
226
+ run_btn.click(
227
  generate_image,
228
  inputs=[
229
  prompt,