K1Z3M1112 commited on
Commit
ff40a4f
·
verified ·
1 Parent(s): d189ef8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +276 -95
app.py CHANGED
@@ -10,6 +10,7 @@ dtype = torch.float16 if torch.cuda.is_available() else torch.float32
10
 
11
  # Lazy import (to avoid long startup if unused)
12
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, StableDiffusionPipeline
 
13
  from controlnet_aux import LineartDetector, LineartAnimeDetector
14
 
15
  # ===== Model & Config =====
@@ -18,6 +19,8 @@ LINEART_DETECTOR = None
18
  LINEART_ANIME_DETECTOR = None
19
  CURRENT_T2I_PIPE = None
20
  CURRENT_T2I_MODEL = None
 
 
21
 
22
  def get_pipeline(model_name: str, anime_model: bool = False):
23
  """Get or create a ControlNet pipeline for the given model and anime flag"""
@@ -28,60 +31,108 @@ def get_pipeline(model_name: str, anime_model: bool = False):
28
 
29
  print(f"Loading ControlNet pipeline for model: {model_name}, anime: {anime_model}")
30
 
31
- # โหลด ControlNet ที่เหมาะสม
32
- if anime_model:
33
- controlnet = ControlNetModel.from_pretrained(
34
- "lllyasviel/control_v11p_sd15s2_lineart_anime",
35
- torch_dtype=dtype
36
- ).to(device)
37
- else:
38
- controlnet = ControlNetModel.from_pretrained(
39
- "lllyasviel/control_v11p_sd15_lineart",
40
- torch_dtype=dtype
 
 
 
 
 
 
 
 
 
 
41
  ).to(device)
42
-
43
- # สร้าง pipeline
44
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
45
- model_name,
46
- controlnet=controlnet,
47
- torch_dtype=dtype,
48
- safety_checker=None,
49
- requires_safety_checker=False
50
- ).to(device)
51
-
52
- pipe.enable_attention_slicing()
53
- if device.type == "cuda":
54
- pipe.enable_model_cpu_offload()
55
-
56
- # เก็บ pipeline ไว้ใน cache
57
- PIPELINES[key] = pipe
58
- return pipe
59
 
60
  def load_lineart_detectors():
61
  """Load lineart detectors if not already loaded"""
62
  global LINEART_DETECTOR, LINEART_ANIME_DETECTOR
63
  if LINEART_DETECTOR is None:
64
  print("Loading lineart detectors...")
65
- LINEART_DETECTOR = LineartDetector.from_pretrained("lllyasviel/Annotators")
66
- LINEART_ANIME_DETECTOR = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
 
 
 
 
67
 
68
  def load_t2i_model(model_name: str):
69
  global CURRENT_T2I_PIPE, CURRENT_T2I_MODEL
70
- if CURRENT_T2I_MODEL == model_name and CURRENT_T2I_PIPE is not None:
71
- return
72
- if CURRENT_T2I_PIPE is not None:
73
- del CURRENT_T2I_PIPE
74
- gc.collect()
75
- if torch.cuda.is_available():
76
- torch.cuda.empty_cache()
77
- print(f"Loading T2I model: {model_name}")
78
- CURRENT_T2I_PIPE = StableDiffusionPipeline.from_pretrained(
79
- model_name, torch_dtype=dtype, safety_checker=None, requires_safety_checker=False
80
- ).to(device)
81
- CURRENT_T2I_PIPE.enable_attention_slicing()
82
- if device.type == "cuda":
83
- CURRENT_T2I_PIPE.enable_model_cpu_offload()
84
- CURRENT_T2I_MODEL = model_name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  # ===== Utils =====
87
  def is_lineart(img: Image.Image) -> bool:
@@ -97,76 +148,158 @@ def extract_lineart(img, anime: bool = False):
97
  out = detector(img, detect_resolution=512, image_resolution=512)
98
  return Image.fromarray(out) if isinstance(out, np.ndarray) else out
99
 
 
 
 
 
 
 
 
 
 
 
100
  # ===== Functions =====
101
  def colorize(sketch, base_model, anime_model, prompt, seed, steps, scale, cn_weight):
102
- # โหลด pipeline ที่เหมาะสม
103
- pipe = get_pipeline(base_model, anime_model)
104
-
105
- # สกัด lineart
106
- lineart = extract_lineart(sketch, anime_model)
107
-
108
- # สร้างภาพ
109
- gen = torch.Generator(device=device).manual_seed(int(seed))
110
- out = pipe(
111
- prompt,
112
- image=lineart,
113
- num_inference_steps=int(steps),
114
- guidance_scale=float(scale),
115
- controlnet_conditioning_scale=float(cn_weight),
116
- generator=gen
117
- ).images[0]
118
-
119
- return out, lineart
 
 
 
 
 
 
 
120
 
121
  def t2i(prompt, model, seed, steps, scale, w, h):
122
- load_t2i_model(model)
123
- gen = torch.Generator(device=device).manual_seed(int(seed))
124
- return CURRENT_T2I_PIPE(
125
- prompt,
126
- width=int(w),
127
- height=int(h),
128
- num_inference_steps=int(steps),
129
- guidance_scale=float(scale),
130
- generator=gen
131
- ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  # ===== Function to unload all models =====
134
  def unload_all_models():
135
  global PIPELINES, LINEART_DETECTOR, LINEART_ANIME_DETECTOR
136
  global CURRENT_T2I_PIPE, CURRENT_T2I_MODEL
 
137
 
138
  print("Unloading all models from memory...")
139
 
140
  # Unload ControlNet pipelines
141
  for key, pipe in list(PIPELINES.items()):
142
- del pipe
 
 
 
143
  PIPELINES.clear()
144
 
145
  # Unload lineart detectors
146
- if LINEART_DETECTOR is not None:
147
- del LINEART_DETECTOR
148
- LINEART_DETECTOR = None
149
- if LINEART_ANIME_DETECTOR is not None:
150
- del LINEART_ANIME_DETECTOR
151
- LINEART_ANIME_DETECTOR = None
 
 
 
 
 
 
 
152
 
153
  # Unload T2I model
154
- if CURRENT_T2I_PIPE is not None:
155
- del CURRENT_T2I_PIPE
156
- CURRENT_T2I_PIPE = None
 
 
 
 
157
  CURRENT_T2I_MODEL = None
158
 
 
 
 
 
 
 
 
 
 
 
159
  # Force garbage collection
160
  gc.collect()
161
  if torch.cuda.is_available():
162
  torch.cuda.empty_cache()
163
- print(f"GPU memory cleared. Allocated: {torch.cuda.memory_allocated()/1024**3:.2f} GB")
 
164
 
165
  return "✅ All models unloaded from memory!"
166
 
167
  # ===== Gradio UI =====
168
  with gr.Blocks() as demo:
169
- gr.Markdown("# 🎨 Minimal Style2Paints")
170
 
171
  # Add unload button at the top
172
  with gr.Row():
@@ -174,7 +307,7 @@ with gr.Blocks() as demo:
174
  status_text = gr.Textbox(label="Status", interactive=False)
175
  unload_btn.click(unload_all_models, outputs=status_text)
176
 
177
- with gr.Tab("🎨 Colorize"):
178
  with gr.Row():
179
  inp = gr.Image(label="Input Sketch/Image", type="pil")
180
  out = gr.Image(label="Colored Output")
@@ -183,18 +316,16 @@ with gr.Blocks() as demo:
183
  sketch_out = gr.Image(label="Detected Lineart", type="pil")
184
 
185
  with gr.Row():
186
- # Dropdown สำหรับเลือกโมเดลฐาน
187
  base_model = gr.Dropdown(
188
  choices=[
189
- "admruul/anything-v3.0", # เปลี่ยนเป็น admruul/anything-v3.0
190
  "digiplay/ChikMix_V3",
191
  "digiplay/chilloutmix_NiPrunedFp16Fix",
192
  "runwayml/stable-diffusion-v1-5",
193
  "stabilityai/stable-diffusion-2-1",
194
- "andite/anything-v4.0", # เพิ่มอีกตัวเลือก
195
- "gsdf/Counterfeit-V2.5" # เพิ่มอีกตัวเลือก
196
  ],
197
- value="admruul/anything-v3.0", # เปลี่ยนค่าเริ่มต้นเป็น admruul/anything-v3.0
198
  label="Base Model"
199
  )
200
  anime_chk = gr.Checkbox(label="Use Anime ControlNet", value=True)
@@ -227,15 +358,14 @@ with gr.Blocks() as demo:
227
  t2i_prompt = gr.Textbox(label="Prompt", lines=3)
228
  t2i_model = gr.Dropdown(
229
  choices=[
230
- "admruul/anything-v3.0", # เปลี่ยนเป็น admruul/anything-v3.0
231
  "digiplay/ChikMix_V3",
232
  "digiplay/chilloutmix_NiPrunedFp16Fix",
233
  "runwayml/stable-diffusion-v1-5",
234
  "stabilityai/stable-diffusion-2-1",
235
- "andite/anything-v4.0", # เพิ่มอีกตัวเลือก
236
- "gsdf/Counterfeit-V2.5" # เพิ่มอีกตัวเลือก
237
  ],
238
- value="admruul/anything-v3.0", # เปลี่ยนค่าเริ่มต้นเป็น admruul/anything-v3.0
239
  label="Model"
240
  )
241
 
@@ -254,5 +384,56 @@ with gr.Blocks() as demo:
254
  [t2i_prompt, t2i_model, t2i_seed, t2i_steps, t2i_scale, w, h],
255
  t2i_out
256
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
 
258
- demo.launch()
 
 
 
 
 
10
 
11
  # Lazy import (to avoid long startup if unused)
12
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, StableDiffusionPipeline
13
+ from diffusers import StableDiffusionInstructPix2PixPipeline
14
  from controlnet_aux import LineartDetector, LineartAnimeDetector
15
 
16
  # ===== Model & Config =====
 
19
  LINEART_ANIME_DETECTOR = None
20
  CURRENT_T2I_PIPE = None
21
  CURRENT_T2I_MODEL = None
22
+ CURRENT_PIX2PIX_PIPE = None
23
+ CURRENT_PIX2PIX_MODEL = None
24
 
25
  def get_pipeline(model_name: str, anime_model: bool = False):
26
  """Get or create a ControlNet pipeline for the given model and anime flag"""
 
31
 
32
  print(f"Loading ControlNet pipeline for model: {model_name}, anime: {anime_model}")
33
 
34
+ try:
35
+ # โหลด ControlNet ที่เหมาะสม
36
+ if anime_model:
37
+ controlnet = ControlNetModel.from_pretrained(
38
+ "lllyasviel/control_v11p_sd15s2_lineart_anime",
39
+ torch_dtype=dtype
40
+ ).to(device)
41
+ else:
42
+ controlnet = ControlNetModel.from_pretrained(
43
+ "lllyasviel/control_v11p_sd15_lineart",
44
+ torch_dtype=dtype
45
+ ).to(device)
46
+
47
+ # สร้าง pipeline
48
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
49
+ model_name,
50
+ controlnet=controlnet,
51
+ torch_dtype=dtype,
52
+ safety_checker=None,
53
+ requires_safety_checker=False
54
  ).to(device)
55
+
56
+ pipe.enable_attention_slicing()
57
+ if device.type == "cuda":
58
+ pipe.enable_model_cpu_offload()
59
+
60
+ # เก็บ pipeline ไว้ใน cache
61
+ PIPELINES[key] = pipe
62
+ return pipe
63
+
64
+ except Exception as e:
65
+ print(f"Error loading ControlNet pipeline: {e}")
66
+ raise
 
 
 
 
 
67
 
68
  def load_lineart_detectors():
69
  """Load lineart detectors if not already loaded"""
70
  global LINEART_DETECTOR, LINEART_ANIME_DETECTOR
71
  if LINEART_DETECTOR is None:
72
  print("Loading lineart detectors...")
73
+ try:
74
+ LINEART_DETECTOR = LineartDetector.from_pretrained("lllyasviel/Annotators")
75
+ LINEART_ANIME_DETECTOR = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
76
+ except Exception as e:
77
+ print(f"Error loading lineart detectors: {e}")
78
+ raise
79
 
80
  def load_t2i_model(model_name: str):
81
  global CURRENT_T2I_PIPE, CURRENT_T2I_MODEL
82
+ try:
83
+ if CURRENT_T2I_MODEL == model_name and CURRENT_T2I_PIPE is not None:
84
+ return
85
+ if CURRENT_T2I_PIPE is not None:
86
+ del CURRENT_T2I_PIPE
87
+ CURRENT_T2I_PIPE = None
88
+ gc.collect()
89
+ if torch.cuda.is_available():
90
+ torch.cuda.empty_cache()
91
+
92
+ print(f"Loading T2I model: {model_name}")
93
+ CURRENT_T2I_PIPE = StableDiffusionPipeline.from_pretrained(
94
+ model_name, torch_dtype=dtype, safety_checker=None, requires_safety_checker=False
95
+ ).to(device)
96
+ CURRENT_T2I_PIPE.enable_attention_slicing()
97
+ if device.type == "cuda":
98
+ CURRENT_T2I_PIPE.enable_model_cpu_offload()
99
+ CURRENT_T2I_MODEL = model_name
100
+
101
+ except Exception as e:
102
+ print(f"Error loading T2I model {model_name}: {e}")
103
+ # รีเซ็ตตัวแปรเมื่อโหลดไม่สำเร็จ
104
+ CURRENT_T2I_PIPE = None
105
+ CURRENT_T2I_MODEL = None
106
+ raise
107
+
108
+ def load_pix2pix_model():
109
+ """Load Instruct-Pix2Pix model for image editing"""
110
+ global CURRENT_PIX2PIX_PIPE, CURRENT_PIX2PIX_MODEL
111
+
112
+ if CURRENT_PIX2PIX_PIPE is not None:
113
+ return CURRENT_PIX2PIX_PIPE
114
+
115
+ try:
116
+ print("Loading Instruct-Pix2Pix model...")
117
+ CURRENT_PIX2PIX_PIPE = StableDiffusionInstructPix2PixPipeline.from_pretrained(
118
+ "timbrooks/instruct-pix2pix",
119
+ torch_dtype=dtype,
120
+ safety_checker=None,
121
+ requires_safety_checker=False
122
+ ).to(device)
123
+
124
+ CURRENT_PIX2PIX_PIPE.enable_attention_slicing()
125
+ if device.type == "cuda":
126
+ CURRENT_PIX2PIX_PIPE.enable_model_cpu_offload()
127
+
128
+ CURRENT_PIX2PIX_MODEL = "timbrooks/instruct-pix2pix"
129
+ return CURRENT_PIX2PIX_PIPE
130
+
131
+ except Exception as e:
132
+ print(f"Error loading Instruct-Pix2Pix model: {e}")
133
+ CURRENT_PIX2PIX_PIPE = None
134
+ CURRENT_PIX2PIX_MODEL = None
135
+ raise
136
 
137
  # ===== Utils =====
138
  def is_lineart(img: Image.Image) -> bool:
 
148
  out = detector(img, detect_resolution=512, image_resolution=512)
149
  return Image.fromarray(out) if isinstance(out, np.ndarray) else out
150
 
151
+ def resize_image(image, max_size=512):
152
+ """Resize image while maintaining aspect ratio"""
153
+ width, height = image.size
154
+ if max(width, height) > max_size:
155
+ ratio = max_size / max(width, height)
156
+ new_width = int(width * ratio)
157
+ new_height = int(height * ratio)
158
+ return image.resize((new_width, new_height), Image.LANCZOS)
159
+ return image
160
+
161
  # ===== Functions =====
162
  def colorize(sketch, base_model, anime_model, prompt, seed, steps, scale, cn_weight):
163
+ try:
164
+ # โหลด pipeline ที่เหมาะสม
165
+ pipe = get_pipeline(base_model, anime_model)
166
+
167
+ # สกัด lineart
168
+ lineart = extract_lineart(sketch, anime_model)
169
+
170
+ # สร้างภาพ
171
+ gen = torch.Generator(device=device).manual_seed(int(seed))
172
+ out = pipe(
173
+ prompt,
174
+ image=lineart,
175
+ num_inference_steps=int(steps),
176
+ guidance_scale=float(scale),
177
+ controlnet_conditioning_scale=float(cn_weight),
178
+ generator=gen
179
+ ).images[0]
180
+
181
+ return out, lineart
182
+ except Exception as e:
183
+ print(f"Error in colorize: {e}")
184
+ # ส่งกลับรูปภาพว่างพร้อมแสดง error
185
+ error_img = Image.new('RGB', (512, 512), color='red')
186
+ error_text = f"Error: {str(e)[:50]}..."
187
+ return error_img, Image.new('RGB', (512, 512), color='gray')
188
 
189
  def t2i(prompt, model, seed, steps, scale, w, h):
190
+ try:
191
+ load_t2i_model(model)
192
+ gen = torch.Generator(device=device).manual_seed(int(seed))
193
+ return CURRENT_T2I_PIPE(
194
+ prompt,
195
+ width=int(w),
196
+ height=int(h),
197
+ num_inference_steps=int(steps),
198
+ guidance_scale=float(scale),
199
+ generator=gen
200
+ ).images[0]
201
+ except Exception as e:
202
+ print(f"Error in t2i: {e}")
203
+ # ส่งกลับรูปภาพว่างพร้อมแสดง error
204
+ error_img = Image.new('RGB', (int(w), int(h)), color='red')
205
+ return error_img
206
+
207
+ def pix2pix_edit(image, instruction, seed, steps, scale, image_scale):
208
+ """Edit image using Instruct-Pix2Pix"""
209
+ try:
210
+ # โหลดโมเดล
211
+ pipe = load_pix2pix_model()
212
+
213
+ # ปรับขนาดภาพ
214
+ image = resize_image(image, max_size=768)
215
+
216
+ # สร้าง generator
217
+ gen = torch.Generator(device=device).manual_seed(int(seed))
218
+
219
+ # แก��ไขภาพ
220
+ result = pipe(
221
+ instruction,
222
+ image=image,
223
+ num_inference_steps=int(steps),
224
+ guidance_scale=float(scale),
225
+ image_guidance_scale=float(image_scale),
226
+ generator=gen
227
+ ).images[0]
228
+
229
+ return result
230
+
231
+ except Exception as e:
232
+ print(f"Error in pix2pix_edit: {e}")
233
+ # ส่งกลับรูปภาพ error
234
+ if image:
235
+ error_img = Image.new('RGB', image.size, color='red')
236
+ else:
237
+ error_img = Image.new('RGB', (512, 512), color='red')
238
+ return error_img
239
 
240
  # ===== Function to unload all models =====
241
  def unload_all_models():
242
  global PIPELINES, LINEART_DETECTOR, LINEART_ANIME_DETECTOR
243
  global CURRENT_T2I_PIPE, CURRENT_T2I_MODEL
244
+ global CURRENT_PIX2PIX_PIPE, CURRENT_PIX2PIX_MODEL
245
 
246
  print("Unloading all models from memory...")
247
 
248
  # Unload ControlNet pipelines
249
  for key, pipe in list(PIPELINES.items()):
250
+ try:
251
+ del pipe
252
+ except:
253
+ pass
254
  PIPELINES.clear()
255
 
256
  # Unload lineart detectors
257
+ try:
258
+ if LINEART_DETECTOR is not None:
259
+ del LINEART_DETECTOR
260
+ LINEART_DETECTOR = None
261
+ except:
262
+ pass
263
+
264
+ try:
265
+ if LINEART_ANIME_DETECTOR is not None:
266
+ del LINEART_ANIME_DETECTOR
267
+ LINEART_ANIME_DETECTOR = None
268
+ except:
269
+ pass
270
 
271
  # Unload T2I model
272
+ try:
273
+ if CURRENT_T2I_PIPE is not None:
274
+ del CURRENT_T2I_PIPE
275
+ CURRENT_T2I_PIPE = None
276
+ except:
277
+ pass
278
+
279
  CURRENT_T2I_MODEL = None
280
 
281
+ # Unload Pix2Pix model
282
+ try:
283
+ if CURRENT_PIX2PIX_PIPE is not None:
284
+ del CURRENT_PIX2PIX_PIPE
285
+ CURRENT_PIX2PIX_PIPE = None
286
+ except:
287
+ pass
288
+
289
+ CURRENT_PIX2PIX_MODEL = None
290
+
291
  # Force garbage collection
292
  gc.collect()
293
  if torch.cuda.is_available():
294
  torch.cuda.empty_cache()
295
+ allocated = torch.cuda.memory_allocated() / 1024**3
296
+ print(f"GPU memory cleared. Allocated: {allocated:.2f} GB")
297
 
298
  return "✅ All models unloaded from memory!"
299
 
300
  # ===== Gradio UI =====
301
  with gr.Blocks() as demo:
302
+ gr.Markdown("# 🎨 Advanced Image Generation & Editing Suite")
303
 
304
  # Add unload button at the top
305
  with gr.Row():
 
307
  status_text = gr.Textbox(label="Status", interactive=False)
308
  unload_btn.click(unload_all_models, outputs=status_text)
309
 
310
+ with gr.Tab("🎨 Colorize Sketch"):
311
  with gr.Row():
312
  inp = gr.Image(label="Input Sketch/Image", type="pil")
313
  out = gr.Image(label="Colored Output")
 
316
  sketch_out = gr.Image(label="Detected Lineart", type="pil")
317
 
318
  with gr.Row():
 
319
  base_model = gr.Dropdown(
320
  choices=[
321
+ "admruul/anything-v3.0",
322
  "digiplay/ChikMix_V3",
323
  "digiplay/chilloutmix_NiPrunedFp16Fix",
324
  "runwayml/stable-diffusion-v1-5",
325
  "stabilityai/stable-diffusion-2-1",
326
+ "gsdf/Counterfeit-V2.5"
 
327
  ],
328
+ value="admruul/anything-v3.0",
329
  label="Base Model"
330
  )
331
  anime_chk = gr.Checkbox(label="Use Anime ControlNet", value=True)
 
358
  t2i_prompt = gr.Textbox(label="Prompt", lines=3)
359
  t2i_model = gr.Dropdown(
360
  choices=[
361
+ "admruul/anything-v3.0",
362
  "digiplay/ChikMix_V3",
363
  "digiplay/chilloutmix_NiPrunedFp16Fix",
364
  "runwayml/stable-diffusion-v1-5",
365
  "stabilityai/stable-diffusion-2-1",
366
+ "gsdf/Counterfeit-V2.5"
 
367
  ],
368
+ value="admruul/anything-v3.0",
369
  label="Model"
370
  )
371
 
 
384
  [t2i_prompt, t2i_model, t2i_seed, t2i_steps, t2i_scale, w, h],
385
  t2i_out
386
  )
387
+
388
+ with gr.Tab("🔄 Instruct-Pix2Pix"):
389
+ gr.Markdown("### Edit Images with Text Instructions")
390
+ gr.Markdown("ตัวอย่างคำสั่ง: 'make it winter', 'turn day into night', 'add sunglasses', 'make it look like a painting'")
391
+
392
+ with gr.Row():
393
+ with gr.Column():
394
+ pix2pix_input = gr.Image(label="Input Image", type="pil")
395
+ pix2pix_instruction = gr.Textbox(
396
+ label="Edit Instruction",
397
+ placeholder="e.g., make it winter, turn day into night, add sunglasses...",
398
+ lines=2
399
+ )
400
+
401
+ with gr.Row():
402
+ pix2pix_seed = gr.Number(value=42, label="Seed")
403
+ pix2pix_steps = gr.Slider(10, 100, 50, step=5, label="Steps")
404
+
405
+ with gr.Row():
406
+ pix2pix_scale = gr.Slider(1, 20, 7.5, step=0.5, label="Text Guidance Scale")
407
+ pix2pix_image_scale = gr.Slider(1, 5, 1.5, step=0.1, label="Image Guidance Scale")
408
+
409
+ pix2pix_btn = gr.Button("🔄 Edit Image", variant="primary")
410
+
411
+ with gr.Column():
412
+ pix2pix_output = gr.Image(label="Edited Image", type="pil")
413
+
414
+ # ตัวอย่างคำสั่งที่พบบ่อย
415
+ with gr.Row():
416
+ gr.Examples(
417
+ examples=[
418
+ ["make it winter", 42, 50, 7.5, 1.5],
419
+ ["turn day into night", 42, 50, 7.5, 1.5],
420
+ ["make it look like a painting", 42, 50, 7.5, 1.5],
421
+ ["add sunglasses", 42, 50, 7.5, 1.5],
422
+ ["make it cyberpunk style", 42, 50, 7.5, 1.5],
423
+ ["change hair color to blue", 42, 50, 7.5, 1.5],
424
+ ],
425
+ inputs=[pix2pix_instruction, pix2pix_seed, pix2pix_steps, pix2pix_scale, pix2pix_image_scale],
426
+ label="Quick Examples"
427
+ )
428
+
429
+ pix2pix_btn.click(
430
+ pix2pix_edit,
431
+ [pix2pix_input, pix2pix_instruction, pix2pix_seed, pix2pix_steps, pix2pix_scale, pix2pix_image_scale],
432
+ pix2pix_output
433
+ )
434
 
435
+ # เพิ่ม error handling ในการ launch
436
+ try:
437
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
438
+ except Exception as e:
439
+ print(f"Error launching Gradio app: {e}")