Keep getting error '400' status code

I keep running into this error:

ModelError Traceback (most recent call last)
Cell In[13], line 46
43 serialized_payload = json.dumps(payload) # Serialize the payload to JSON format
45 # Make API call
—> 46 response = client.invoke_endpoint(
47 EndpointName=endpoint_name,
48 Body=serialized_payload,
49 ContentType=‘application/json’ # Specify the format of the payload
50 )
52 response_payload = json.loads(response[‘Body’].read().decode(“utf-8”))
54 end_time = time.time()

File /opt/conda/lib/python3.10/site-packages/botocore/client.py:535, in ClientCreator._create_api_method.._api_call(self, *args, **kwargs)
531 raise TypeError(
532 f"{py_operation_name}() only accepts keyword arguments."
533 )
534 # The “self” in this scope is referring to the BaseClient.
→ 535 return self._make_api_call(operation_name, kwargs)

File /opt/conda/lib/python3.10/site-packages/botocore/client.py:980, in BaseClient._make_api_call(self, operation_name, api_params)
978 error_code = parsed_response.get(“Error”, {}).get(“Code”)
979 error_class = self.exceptions.from_code(error_code)
→ 980 raise error_class(parsed_response, operation_name)
981 else:
982 return parsed_response

ModelError: An error occurred (ModelError) when calling the InvokeEndpoint operation: Received client error (400) from primary with message "{
“code”: 400,
“type”: “InternalServerException”,
“message”: “\u0027NoneType\u0027 object has no attribute \u0027startswith\u0027”
}

This is my model’s inference script:

%%writefile code/inference.py
import base64
import torch
from io import BytesIO
from diffusers import StableDiffusionPipeline

def model_fn(model_dir):
# Load stable diffusion and move it to the GPU
pipe = StableDiffusionPipeline.from_pretrained(model_dir, torch_dtype=torch.float16)
pipe = pipe.to(“cuda”)

return pipe

def predict_fn(data, pipe):

# get prompt & parameters
prompt = data.pop("inputs", data)
# set valid HP for stable diffusion
num_inference_steps = data.pop("num_inference_steps", 50)
guidance_scale = data.pop("guidance_scale", 7.5)
num_images_per_prompt = data.pop("num_images_per_prompt", 4)

# run generation with parameters
generated_images = pipe(
    prompt,
    num_inference_steps=num_inference_steps,
    guidance_scale=guidance_scale,
    num_images_per_prompt=num_images_per_prompt,
)["images"]

# create response
encoded_images = []
for image in generated_images:
    buffered = BytesIO()
    image.save(buffered, format="JPEG")
    encoded_images.append(base64.b64encode(buffered.getvalue()).decode())

# create response
return {"generated_images": encoded_images}

Does anyone know how to fix this?