Spaces:
Running
Running
add new func to show canonical image
Browse files
app.py
CHANGED
|
@@ -218,6 +218,7 @@ def edit_with_pnp(input_video, prompt, num_steps, guidance_scale, seed, n_prompt
|
|
| 218 |
ori_size = canonical_image.size
|
| 219 |
image = processor_partial(canonical_image.resize((size_, size_)), detect_resolution=size_, image_resolution=size_)
|
| 220 |
image = image.resize(ori_size, resample=Image.BILINEAR)
|
|
|
|
| 221 |
|
| 222 |
generator = torch.manual_seed(seed) if seed != -1 else None
|
| 223 |
output_images = pipe(
|
|
@@ -228,6 +229,8 @@ def edit_with_pnp(input_video, prompt, num_steps, guidance_scale, seed, n_prompt
|
|
| 228 |
negative_prompt=n_prompt,
|
| 229 |
generator=generator
|
| 230 |
).images
|
|
|
|
|
|
|
| 231 |
# output_images[0] = output_images[0].resize(ori_size, resample=Image.BILINEAR)
|
| 232 |
|
| 233 |
else:
|
|
@@ -245,6 +248,7 @@ def edit_with_pnp(input_video, prompt, num_steps, guidance_scale, seed, n_prompt
|
|
| 245 |
image = image[:, :, None]
|
| 246 |
image = np.concatenate([image, image, image], axis=2)
|
| 247 |
image = Image.fromarray(image)
|
|
|
|
| 248 |
|
| 249 |
generator = torch.manual_seed(seed) if seed != -1 else None
|
| 250 |
output_images = pipe(
|
|
@@ -255,11 +259,18 @@ def edit_with_pnp(input_video, prompt, num_steps, guidance_scale, seed, n_prompt
|
|
| 255 |
negative_prompt=n_prompt,
|
| 256 |
generator=generator
|
| 257 |
).images
|
|
|
|
|
|
|
| 258 |
|
| 259 |
edit_video_path = NaRCan_make_video(output_images[0], pth_path, frames_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 260 |
|
| 261 |
# Here we return the first output image as the result
|
| 262 |
-
return edit_video_path
|
| 263 |
|
| 264 |
|
| 265 |
########
|
|
@@ -273,7 +284,7 @@ intro = """
|
|
| 273 |
NaRCan - <small>Natural Refined Canonical Image</small>
|
| 274 |
</h1>
|
| 275 |
<span>[<a target="_blank" href="https://koi953215.github.io/NaRCan_page/">Project page</a>], [<a target="_blank" href="https://huggingface.co/papers/2406.06523">Paper</a>]</span>
|
| 276 |
-
<div style="display:flex; justify-content: center;margin-top: 0.5em">
|
| 277 |
</div>
|
| 278 |
"""
|
| 279 |
|
|
@@ -303,6 +314,10 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 303 |
# placeholder="bear, Van Gogh Style"
|
| 304 |
)
|
| 305 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
|
| 307 |
with gr.Row():
|
| 308 |
run_button = gr.Button("Edit your video!", visible=True)
|
|
@@ -351,7 +366,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 351 |
n_prompt,
|
| 352 |
control_type,
|
| 353 |
],
|
| 354 |
-
outputs = [output_video]
|
| 355 |
)
|
| 356 |
|
| 357 |
gr.Examples(
|
|
|
|
| 218 |
ori_size = canonical_image.size
|
| 219 |
image = processor_partial(canonical_image.resize((size_, size_)), detect_resolution=size_, image_resolution=size_)
|
| 220 |
image = image.resize(ori_size, resample=Image.BILINEAR)
|
| 221 |
+
image.save("control_map.png")
|
| 222 |
|
| 223 |
generator = torch.manual_seed(seed) if seed != -1 else None
|
| 224 |
output_images = pipe(
|
|
|
|
| 229 |
negative_prompt=n_prompt,
|
| 230 |
generator=generator
|
| 231 |
).images
|
| 232 |
+
|
| 233 |
+
output_images[0].save("edited_canonical_image.png")
|
| 234 |
# output_images[0] = output_images[0].resize(ori_size, resample=Image.BILINEAR)
|
| 235 |
|
| 236 |
else:
|
|
|
|
| 248 |
image = image[:, :, None]
|
| 249 |
image = np.concatenate([image, image, image], axis=2)
|
| 250 |
image = Image.fromarray(image)
|
| 251 |
+
image.save("control_map.png")
|
| 252 |
|
| 253 |
generator = torch.manual_seed(seed) if seed != -1 else None
|
| 254 |
output_images = pipe(
|
|
|
|
| 259 |
negative_prompt=n_prompt,
|
| 260 |
generator=generator
|
| 261 |
).images
|
| 262 |
+
|
| 263 |
+
output_images[0].save("edited_canonical_image.png")
|
| 264 |
|
| 265 |
edit_video_path = NaRCan_make_video(output_images[0], pth_path, frames_path)
|
| 266 |
+
edit_image_path = [
|
| 267 |
+
(image_path, "canonical image"),
|
| 268 |
+
("control_map.png", "control map"),
|
| 269 |
+
("edited_canonical_image.png", "edited canonical image")
|
| 270 |
+
]
|
| 271 |
|
| 272 |
# Here we return the first output image as the result
|
| 273 |
+
return edit_video_path, edit_image_path
|
| 274 |
|
| 275 |
|
| 276 |
########
|
|
|
|
| 284 |
NaRCan - <small>Natural Refined Canonical Image</small>
|
| 285 |
</h1>
|
| 286 |
<span>[<a target="_blank" href="https://koi953215.github.io/NaRCan_page/">Project page</a>], [<a target="_blank" href="https://huggingface.co/papers/2406.06523">Paper</a>]</span>
|
| 287 |
+
<div style="display:flex; justify-content: center;margin-top: 0.5em">Try selecting different control types (Canny or Lineart) in Advanced options!</div>
|
| 288 |
</div>
|
| 289 |
"""
|
| 290 |
|
|
|
|
| 314 |
# placeholder="bear, Van Gogh Style"
|
| 315 |
)
|
| 316 |
|
| 317 |
+
|
| 318 |
+
with gr.Row():
|
| 319 |
+
canonical_result = gr.Gallery(label="Edited Canonical Image", columns=3)
|
| 320 |
+
|
| 321 |
|
| 322 |
with gr.Row():
|
| 323 |
run_button = gr.Button("Edit your video!", visible=True)
|
|
|
|
| 366 |
n_prompt,
|
| 367 |
control_type,
|
| 368 |
],
|
| 369 |
+
outputs = [output_video, canonical_result]
|
| 370 |
)
|
| 371 |
|
| 372 |
gr.Examples(
|