hello,
I had a demo a few days ago where the API was working fine. yesterday during testing i received a 422 error (nothing was changed since my demo):
def setup_pipelines():
“”"
Sets up the necessary pipelines based on the system’s capabilities (GPU vs CPU)
“”"
scheduler = DPMSolverMultistepScheduler.from_pretrained(MODEL_ID, subfolder=“scheduler”)
if torch.cuda.is_available():
pipe = StableDiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.float16, scheduler=scheduler).to(“cuda”)
pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.float16, scheduler=scheduler).to(“cuda”)
else:
pipe = StableDiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.float32, scheduler=scheduler)
pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.float32, scheduler=scheduler)
return pipe, pipe_i2i
pipe, pipe_i2i = setup_pipelines()
def format_error_message(error, title=“Error”):
“”“Formats the error message to be displayed”“”
return f"#### {title}\n{error}" if error else “”
def inference(prompt:str, neg_prompt:str, guidance:float, steps:int, width=512, height=512, seed=0, auto_prefix=False, img=None, strength=0):
“”"
Performs the inference process, handling both text-to-image and image-to-image scenarios.
“”"
generator = torch.Generator(‘cuda’).manual_seed(seed) if seed != 0 else None
prompt = f"{PREFIX}, {prompt}" if auto_prefix else prompt
neg_prompt = neg_prompt if neg_prompt else DEFAULT_NEG_PROMPT
try:
return generate_image(prompt, neg_prompt, guidance, steps, width, height, generator), None
except Exception as e:
print(format_error_message(e))
return None, format_error_message(e)
def generate_image(prompt, neg_prompt, guidance, steps, width, height, generator):
“”“Generates an image from a text prompt.”“”
print(f"Generating Image…“)
result = pipe(
prompt,
negative_prompt=neg_prompt,
num_inference_steps=int(steps),
guidance_scale=guidance,
width=width,
height=height,
generator=generator
)
print(f"Generated Image → {result.images}”)
return result.images[0]
with gr.Blocks(css=css) as demo:
with gr.Row():
with gr.Column(scale=55):
with gr.Group():
with gr.Row():
prompt = gr.Textbox(label=“Prompt”, show_label=False, max_lines=2,placeholder=f"{PREFIX} [your prompt]").style(container=False)
generate = gr.Button(value=“Generate”).style(rounded=(False, True, True, False))
image_out = gr.Image(height=512)
error_output = gr.Markdown()
with gr.Column(scale=45):
with gr.Tab(“Options”):
with gr.Group():
neg_prompt = gr.Textbox(label=“Negative prompt”, placeholder=“What to exclude from the image”)
auto_prefix = gr.Checkbox(label=“Prefix styling tokens automatically ()”, value=PREFIX, visible=PREFIX)
with gr.Row():
guidance = gr.Slider(label=“Guidance scale”, value=7.5, maximum=15)
steps = gr.Slider(label=“Steps”, value=25, minimum=2, maximum=75, step=1)
with gr.Row():
width = gr.Slider(label=“Width”, value=512, minimum=64, maximum=1024, step=8)
height = gr.Slider(label=“Height”, value=512, minimum=64, maximum=1024, step=8)
seed = gr.Slider(0, 2147483647, label=‘Seed (0 = random)’, value=0, step=1)
with gr.Tab(“Image to image”):
with gr.Group():
image = gr.Image(label=“Image”, height=256, tool=“editor”, type=“pil”)
strength = gr.Slider(label=“Transformation strength”, minimum=0, maximum=1, step=0.01, value=0.5)
auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else “[Your prompt]”), inputs=auto_prefix, outputs=prompt, queue=False)
inputs_i2i = [prompt, neg_prompt, guidance, steps, width, height, seed, auto_prefix, image, strength]
inputs = [prompt, neg_prompt, guidance, steps, width, height, seed, auto_prefix]
outputs = [image_out, error_output]
generate.click(fn=inference, inputs=inputs, outputs=outputs, api_name=“txt2img”)
demo.launch()
The error im getting is:
im calling the api using:
export async function sendToApi(responseBody) {
let data_json = { “data”: responseBody };
console.log("SENDING: ",data_json);
return await fetch(‘xxxxx/txt2img’, {
method: “POST”,
headers: {
‘Access-Control-Allow-Credentials’: true,
‘Access-Control-Allow-Origin’: ‘*’,
“Authorization”: ‘Bearer hf_TOKEN’,
“Content-Type”: “application/json”
},
body: JSON.stringify(data_json)
})
.then(response => response.json())
.then(data => {
console.log(data);
return data[“data”][0];
})
.catch(error => {
console.log(error);
return “retry.”;
});
}
Does anyone know why this is happening or what i could do? Litterally made zero changes for this to even occur, i cant even test the api on huggingface