Hello, I have the following code:
!pip install diffusers transformers accelerate peft bitsandbytes datasets controlnet_aux
import torch
from diffusers import StableDiffusionXLPipeline, StableDiffusionUpscalePipeline, DPMSolverMultistepScheduler, ControlNetModel, StableDiffusionXLControlNetPipeline
from peft import LoraConfig, get_peft_model, PeftModel
from accelerate import Accelerator
from datasets import load_dataset
import matplotlib.pyplot as plt
import gc
from controlnet_aux import OpenposeDetector
from diffusers.utils import load_image
from google.colab import drive
import os
from PIL import Image
import random
import requests
pipe = StableDiffusionXLPipeline.from_pretrained(
"stablediffusionapi/epicrealism-xl",
torch_dtype=torch.float16
).to("cuda")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lora_path = "/content/drive/MyDrive/Colab Notebooks/myLora/improved/checkpoint-3000/pytorch_lora_weights.safetensors"
pipe.load_lora_weights(lora_path)
from diffusers import AutoencoderKL # Import AutoencoderKL
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-ema", torch_dtype=torch.float16)
pipe.vae = vae.to("cuda")
# Try the model if it works: generate an image
# Clear GPU memory
torch.cuda.empty_cache()
gc.collect()
prompt = "ultra quality, hyper realistic, 8k photo, detailed face, film grain, masterpiece, best quality, a close-up photoof LucaDanieliPhD, man with blond hair, walking on a street, looking at the camera, upper body, cheerful and natural expression, natural lighting, comfy black outfit, comfortable environment"
negative_prompt = "deformed, ugly, bad eyes, closed eyes, deformed hands, blurry, cgi, airbrushed, plastic, deformed, watermark, beard, legs, grainy, low quality, worst quality"
images = pipe(
prompt,
negative_prompt=negative_prompt,
height=1024,
width=1024,
guidance_scale=8,
num_inference_steps=30,
high_res_fix=True,
num_images_per_prompt=1
).images
# Save and display the images
if images is not None:
print(f"Image generation completed successfully! Generated {len(images)} images.")
for i, image in enumerate(images):
# image.save(f"/content/drive/MyDrive/Colab Notebooks/myLora/test_{i}.png")
plt.figure()
plt.imshow(image)
plt.axis('off')
plt.show()
image.show()
else:
print("Image generation might have failed.")
which gives me the following output. Does anybody know what I am doing wrong?