This is what I have:
Cannot initialize model with low cpu memory usage because accelerate
was not found in the environment. Defaulting to low_cpu_mem_usage=False
. It is strongly recommended to install accelerate
for faster and less memory-intense model loading. You can do so with:
pip install accelerate
.
Traceback (most recent call last):
File “/home/user/.local/lib/python3.8/site-packages/gradio/routes.py”, line 321, in run_predict
output = await app.blocks.process_api(
File “/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py”, line 1015, in process_api
result = await self.call_function(fn_index, inputs, iterator, request)
File “/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py”, line 856, in call_function
prediction = await anyio.to_thread.run_sync(
File “/home/user/.local/lib/python3.8/site-packages/anyio/to_thread.py”, line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File “/home/user/.local/lib/python3.8/site-packages/anyio/_backends/_asyncio.py”, line 937, in run_sync_in_worker_thread
return await future
File “/home/user/.local/lib/python3.8/site-packages/anyio/_backends/_asyncio.py”, line 867, in run
result = context.run(func, *args)
File “app.py”, line 18, in TextToImage
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.get_default_dtype)
File “/home/user/.local/lib/python3.8/site-packages/diffusers/pipeline_utils.py”, line 708, in from_pretrained
loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
File “/home/user/.local/lib/python3.8/site-packages/transformers/modeling_utils.py”, line 2325, in from_pretrained
dtype_orig = cls._set_default_torch_dtype(torch_dtype)
File “/home/user/.local/lib/python3.8/site-packages/transformers/modeling_utils.py”, line 1102, in _set_default_torch_dtype
if not dtype.is_floating_point:
AttributeError: ‘builtin_function_or_method’ object has no attribute ‘is_floating_point’
Traceback (most recent call last):
File “/home/user/.local/lib/python3.8/site-packages/gradio/routes.py”, line 321, in run_predict
output = await app.blocks.process_api(
File “/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py”, line 1013, in process_api
inputs = self.preprocess_data(fn_index, inputs, state)
File “/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py”, line 923, in preprocess_data
processed_input.append(block.preprocess(inputs[i]))
IndexError: list index out of range
Traceback (most recent call last):
File “/home/user/.local/lib/python3.8/site-packages/diffusers/configuration_utils.py”, line 326, in load_config
config_file = hf_hub_download(
File “/home/user/.local/lib/python3.8/site-packages/huggingface_hub/utils/_validators.py”, line 114, in _inner_fn
validate_repo_id(arg_value)
File “/home/user/.local/lib/python3.8/site-packages/huggingface_hub/utils/_validators.py”, line 172, in validate_repo_id
raise HFValidationError(
huggingface_hub.utils.validators.HFValidationError: Repo id must use alphanumeric chars or ‘-’, '', ‘.’, ‘–’ and ‘…’ are forbidden, ‘-’ and ‘.’ cannot start or end the name, max length is 96: ‘’.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File “/home/user/.local/lib/python3.8/site-packages/gradio/routes.py”, line 321, in run_predict
output = await app.blocks.process_api(
File “/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py”, line 1015, in process_api
result = await self.call_function(fn_index, inputs, iterator, request)
File “/home/user/.local/lib/python3.8/site-packages/gradio/blocks.py”, line 856, in call_function
prediction = await anyio.to_thread.run_sync(
File “/home/user/.local/lib/python3.8/site-packages/anyio/to_thread.py”, line 31, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File “/home/user/.local/lib/python3.8/site-packages/anyio/_backends/_asyncio.py”, line 937, in run_sync_in_worker_thread
return await future
File “/home/user/.local/lib/python3.8/site-packages/anyio/_backends/_asyncio.py”, line 867, in run
result = context.run(func, *args)
File “app.py”, line 18, in TextToImage
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.get_default_dtype)
File “/home/user/.local/lib/python3.8/site-packages/diffusers/pipeline_utils.py”, line 459, in from_pretrained
config_dict = cls.load_config(
File “/home/user/.local/lib/python3.8/site-packages/diffusers/configuration_utils.py”, line 363, in load_config
raise EnvironmentError(
OSError: We couldn’t connect to ‘https://huggingface.co’ to load this model, couldn’t find it in the cached files and it looks like is not the path to a directory containing a model_index.json file.
Checkout your internet connection or see how to run the library in offline mode at ‘Installation’.
To get this error I left the model as ‘blank’