Correct way to use the bf32?
Sorry, what is the correct way and combination of templates to use for generating with FP32? I keep getting black images.
Im using:
-diffusion_pytorch_model-ema-only-fp32.safetensors
-qwen_3_4b_bf16.safetensors
-ae.safetensors
Sorry, what is the correct way and combination of templates to use for generating with FP32? I keep getting black images.
Im using:
-diffusion_pytorch_model-ema-only-fp32.safetensors
-qwen_3_4b_bf16.safetensors
-ae.safetensors
i've removed incorrect VAE file, please use ae_bf16.safetensors
diffusion_pytorch_model-ema-only-fp32.safetensors and ae_bf16.safetensors and qwen_3_4b_bf16.safetensors = black images :( :( :(
diffusion_pytorch_model-ema-only-fp32.safetensors and ae_bf16.safetensors and qwen_3_4b_bf16.safetensors = black images :( :( :(
Try to run those 2 scripts one by one specifying local dir for repository. Those scripts will download this repository, but as transformer it will use diffusion_pytorch_model-ema-only-fp32.safetensors - same as you want.
download_repo.py
import argparse
import os
from huggingface_hub import snapshot_download, hf_hub_download
REPO_ID = "tsqn/Z-Image-Turbo_fp32-fp16-bf16_full_and_ema-only"
def main(local_dir):
TRANSFORMER_DIR = f"{local_dir}\\transformer"
TEXT_ENCODER_DIR = f"{local_dir}\\text_encoder"
VAE_DIR = f"{local_dir}\\vae"
def _download_model_files():
snapshot_download(repo_id=REPO_ID, ignore_patterns="*.safetensors", local_dir=local_dir)
hf_hub_download(repo_id=REPO_ID, filename="diffusion_pytorch_model-ema-only-fp32.safetensors", local_dir=local_dir)
hf_hub_download(repo_id=REPO_ID, subfolder="text_encoder", filename="qwen_3_4b_bf16.safetensors", local_dir=local_dir)
hf_hub_download(repo_id=REPO_ID, subfolder="vae", filename="ae_bf16.safetensors", local_dir=local_dir)
def _rename_model_files():
os.rename(f"{TRANSFORMER_DIR}\\diffusion_pytorch_model-ema-only-fp32.safetensors", f"{TRANSFORMER_DIR}\\diffusion_pytorch_model.safetensors")
os.rename(f"{TEXT_ENCODER_DIR}\\qwen_3_4b_bf16.safetensors", f"{TEXT_ENCODER_DIR}\\model.safetensors")
os.rename(f"{VAE_DIR}\\ae_bf16.safetensors", f"{VAE_DIR}\\diffusion_pytorch_model.safetensors")
try:
_download_model_files()
except:
print("ERROR when downloading model files!")
raise Exception
try:
_rename_model_files()
except:
print("ERROR when renaming model files!")
raise Exception
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--local_dir", default=None, type=str, required=True, help="Whether to save repository with model files"
)
args = parser.parse_args()
main(args.local_dir)
generate_image.py
import argparse
import torch
from diffusers import ZImagePipeline, ZImageTransformer2DModel, AutoencoderKL, FlowMatchEulerDiscreteScheduler
from transformers import Qwen3Model, Qwen2Tokenizer
def setup_zimage_pipeline(model_path, model_cpu_offload=True):
def _setup_pipeline_components():
vae = AutoencoderKL.from_pretrained(model_path, subfolder="vae", torch_dtype=torch.bfloat16)
text_encoder = Qwen3Model.from_pretrained(model_path, subfolder="text_encoder", torch_dtype=torch.bfloat16)
tokenizer = Qwen2Tokenizer.from_pretrained(model_path, subfolder="tokenizer")
transformer = ZImageTransformer2DModel.from_pretrained(model_path, subfolder="transformer", torch_dtype=torch.float32)
return {
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"transformer": transformer
}
pipeline = ZImagePipeline.from_pretrained(
model_path,
torch_dtype=torch.float32,
low_cpu_mem_usage=False,
**_setup_pipeline_components()
)
pipeline.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipeline.scheduler.config)
if model_cpu_offload:
pipeline.enable_model_cpu_offload()
return pipeline
def generate_image(pipe, prompt, height, width, num_inference_steps, guidance_scale, seed, output_save_path):
with torch.inference_mode():
image = pipe(
prompt=prompt,
height=height,
width=width,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
generator=torch.Generator("cuda").manual_seed(seed),
).images[0]
if output_save_path:
image.save(f"{output_save_path}\\example_{seed}.png")
else:
image.save(f"example_{seed}.png")
torch.cuda.empty_cache()
def main(args):
pipeline = setup_zimage_pipeline(args.local_dir)
generate_image(
pipe=pipeline,
prompt=args.prompt,
width=args.width,
height=args.height,
num_inference_steps=args.num_inference_steps,
guidance_scale=args.guidance_scale,
seed=args.seed,
output_save_path=args.output_save_path
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--local_dir", default=None, type=str, required=True, help="Path to the zimage diffusers local repository"
)
parser.add_argument(
"--prompt", default="Young Chinese woman in red Hanfu, intricate embroidery. Impeccable makeup, red floral forehead pattern. Elaborate high bun, golden phoenix headdress, red flowers, beads. Holds round folding fan with lady, trees, bird. Neon lightning-bolt lamp (⚡️), bright yellow glow, above extended left palm. Soft-lit outdoor night background, silhouetted tiered pagoda (西安大雁塔), blurred colorful distant lights.", type=str, required=False, help="Prompt used to generate image."
)
parser.add_argument(
"--width", default=1024, type=int, required=False, help="Width of the generated image."
)
parser.add_argument(
"--height", default=1024, type=int, required=False, help="Height of the generated image."
)
parser.add_argument(
"--num_inference_steps", default=9, type=int, required=False, help="Number of the inference steps."
)
parser.add_argument(
"--guidance_scale", default=0.0, type=float, required=False, help="Guidance scale setting."
)
parser.add_argument(
"--seed", default=42, type=int, required=False, help="Random seed value."
)
parser.add_argument(
"--output_save_path",
default=None,
type=str,
required=False,
help="Path to the directory for generated image.",
)
args = parser.parse_args()
main(args)
python download_repo.py --local_dir "C:\\zimage"
python generate_image.py --local_dir "C:\\zimage"
Generate image with custom parameters:
python generate_image.py --local_dir "C:\\zimage" --prompt "Young Chinese woman in red Hanfu, intricate embroidery. Impeccable makeup, red floral forehead pattern. Elaborate high bun, golden phoenix headdress, red flowers, beads. Holds round folding fan with lady, trees, bird. Neon lightning-bolt lamp (⚡️), bright yellow glow, above extended left palm. Soft-lit outdoor night background, silhouetted tiered pagoda (西安大雁塔), blurred colorful distant lights." --width 1024 --height 1042 --num_inference_steps 9 --guidance_scale 0.0 --seed 42 --output_save_path "C:\\zimage_generations"
Local dir must be the same path in both scripts. Tested on Windows.