Spaces:
Running
on
Zero
Running
on
Zero
Update app_lora.py
Browse files- app_lora.py +66 -24
app_lora.py
CHANGED
|
@@ -756,36 +756,78 @@ def generate_imagenegative(prompt, height, width, steps, seed, guidance_scale=7.
|
|
| 756 |
|
| 757 |
@spaces.GPU
|
| 758 |
def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
|
| 759 |
-
|
| 760 |
-
|
| 761 |
-
|
| 762 |
|
| 763 |
-
|
|
|
|
|
|
|
| 764 |
|
| 765 |
-
|
|
|
|
|
|
|
| 766 |
|
| 767 |
-
|
| 768 |
-
|
|
|
|
| 769 |
|
| 770 |
-
|
|
|
|
|
|
|
| 771 |
|
| 772 |
-
|
| 773 |
-
|
| 774 |
-
|
| 775 |
-
|
| 776 |
-
|
| 777 |
-
|
| 778 |
-
|
| 779 |
-
|
| 780 |
-
|
| 781 |
-
|
| 782 |
-
|
| 783 |
-
|
|
|
|
| 784 |
|
| 785 |
-
|
| 786 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 787 |
|
| 788 |
-
return result.images, seed, log_buffer.getvalue()
|
| 789 |
|
| 790 |
|
| 791 |
# this is astable vesopn tha can gen final and a noise to latent
|
|
@@ -793,7 +835,7 @@ def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
|
|
| 793 |
def generate_image_verygood_realnoise(prompt, height, width, steps, seed, guidance_scale=0.0):
|
| 794 |
LOGS = []
|
| 795 |
device = "cuda"
|
| 796 |
-
generator = torch.Generator(
|
| 797 |
|
| 798 |
placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
|
| 799 |
latent_gallery = []
|
|
|
|
| 756 |
|
| 757 |
@spaces.GPU
|
| 758 |
def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
|
| 759 |
+
LOGS = []
|
| 760 |
+
device = "cuda"
|
| 761 |
+
generator = torch.Generator(device).manual_seed(int(seed))
|
| 762 |
|
| 763 |
+
placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
|
| 764 |
+
latent_gallery = []
|
| 765 |
+
final_gallery = []
|
| 766 |
|
| 767 |
+
# --- Generate latent previews in a loop ---
|
| 768 |
+
try:
|
| 769 |
+
latents = safe_get_latents(pipe, height, width, generator, device, LOGS)
|
| 770 |
|
| 771 |
+
# Convert latents to float32 if necessary
|
| 772 |
+
if latents.dtype != torch.float32:
|
| 773 |
+
latents = latents.float()
|
| 774 |
|
| 775 |
+
# Loop for multiple previews before final image
|
| 776 |
+
num_previews = min(10, steps) # show ~10 previews
|
| 777 |
+
preview_steps = torch.linspace(0, 1, num_previews)
|
| 778 |
|
| 779 |
+
for i, alpha in enumerate(preview_steps):
|
| 780 |
+
try:
|
| 781 |
+
with torch.no_grad():
|
| 782 |
+
# Simple noise interpolation for preview (simulate denoising progress)
|
| 783 |
+
preview_latent = latents * alpha + torch.randn_like(latents) * (1 - alpha)
|
| 784 |
+
# Decode to PIL
|
| 785 |
+
latent_img_tensor = pipe.vae.decode(preview_latent).sample # [1,3,H,W]
|
| 786 |
+
latent_img_tensor = (latent_img_tensor / 2 + 0.5).clamp(0, 1)
|
| 787 |
+
latent_img_tensor = latent_img_tensor.cpu().permute(0, 2, 3, 1)[0]
|
| 788 |
+
latent_img = Image.fromarray((latent_img_tensor.numpy() * 255).astype('uint8'))
|
| 789 |
+
except Exception as e:
|
| 790 |
+
LOGS.append(f"⚠️ Latent preview decode failed: {e}")
|
| 791 |
+
latent_img = placeholder
|
| 792 |
|
| 793 |
+
latent_gallery.append(latent_img)
|
| 794 |
+
yield None, latent_gallery, LOGS # update Gradio with intermediate preview
|
| 795 |
+
|
| 796 |
+
# Save final latents to HF
|
| 797 |
+
latent_dict = {"latents": latents.cpu(), "prompt": prompt, "seed": seed}
|
| 798 |
+
try:
|
| 799 |
+
hf_url = upload_latents_to_hf(latent_dict, filename=f"latents_{seed}.pt")
|
| 800 |
+
LOGS.append(f"🔹 Latents uploaded: {hf_url}")
|
| 801 |
+
except Exception as e:
|
| 802 |
+
LOGS.append(f"⚠️ Failed to upload latents: {e}")
|
| 803 |
+
|
| 804 |
+
except Exception as e:
|
| 805 |
+
LOGS.append(f"⚠️ Latent generation failed: {e}")
|
| 806 |
+
latent_gallery.append(placeholder)
|
| 807 |
+
yield None, latent_gallery, LOGS
|
| 808 |
+
|
| 809 |
+
# --- Final image: untouched standard pipeline ---
|
| 810 |
+
try:
|
| 811 |
+
output = pipe(
|
| 812 |
+
prompt=prompt,
|
| 813 |
+
height=height,
|
| 814 |
+
width=width,
|
| 815 |
+
num_inference_steps=steps,
|
| 816 |
+
guidance_scale=guidance_scale,
|
| 817 |
+
generator=generator,
|
| 818 |
+
)
|
| 819 |
+
final_img = output.images[0]
|
| 820 |
+
final_gallery.append(final_img)
|
| 821 |
+
latent_gallery.append(final_img) # fallback preview if needed
|
| 822 |
+
LOGS.append("✅ Standard pipeline succeeded.")
|
| 823 |
+
yield final_img, latent_gallery, LOGS
|
| 824 |
+
|
| 825 |
+
except Exception as e2:
|
| 826 |
+
LOGS.append(f"❌ Standard pipeline failed: {e2}")
|
| 827 |
+
final_gallery.append(placeholder)
|
| 828 |
+
latent_gallery.append(placeholder)
|
| 829 |
+
yield placeholder, latent_gallery, LOGS
|
| 830 |
|
|
|
|
| 831 |
|
| 832 |
|
| 833 |
# this is astable vesopn tha can gen final and a noise to latent
|
|
|
|
| 835 |
def generate_image_verygood_realnoise(prompt, height, width, steps, seed, guidance_scale=0.0):
|
| 836 |
LOGS = []
|
| 837 |
device = "cuda"
|
| 838 |
+
generator = torch.Generator().manual_seed(int(seed))
|
| 839 |
|
| 840 |
placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
|
| 841 |
latent_gallery = []
|