Upload FoxBaze_Try_On_Qwen_Edit_Lora_Alpha_0.txt with huggingface_hub
Browse files
FoxBaze_Try_On_Qwen_Edit_Lora_Alpha_0.txt
CHANGED
|
@@ -15,7 +15,7 @@ image = pipe(image=input_image, prompt=prompt).images[0]
|
|
| 15 |
|
| 16 |
ERROR:
|
| 17 |
Traceback (most recent call last):
|
| 18 |
-
File "/tmp/
|
| 19 |
pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image-Edit", dtype=torch.bfloat16, device_map="cuda")
|
| 20 |
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
|
| 21 |
return fn(*args, **kwargs)
|
|
@@ -27,21 +27,30 @@ Traceback (most recent call last):
|
|
| 27 |
)
|
| 28 |
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/diffusers/pipelines/pipeline_loading_utils.py", line 860, in load_sub_model
|
| 29 |
loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
|
| 30 |
-
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/
|
| 31 |
-
return
|
| 32 |
-
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/
|
| 33 |
) = cls._load_pretrained_model(
|
| 34 |
~~~~~~~~~~~~~~~~~~~~~~~~~~^
|
| 35 |
model,
|
| 36 |
^^^^^^
|
| 37 |
-
...<
|
| 38 |
-
|
| 39 |
-
|
| 40 |
)
|
| 41 |
^
|
| 42 |
-
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
ERROR:
|
| 17 |
Traceback (most recent call last):
|
| 18 |
+
File "/tmp/FoxBaze_Try_On_Qwen_Edit_Lora_Alpha_0YNWzoe.py", line 28, in <module>
|
| 19 |
pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image-Edit", dtype=torch.bfloat16, device_map="cuda")
|
| 20 |
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
|
| 21 |
return fn(*args, **kwargs)
|
|
|
|
| 27 |
)
|
| 28 |
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/diffusers/pipelines/pipeline_loading_utils.py", line 860, in load_sub_model
|
| 29 |
loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
|
| 30 |
+
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
|
| 31 |
+
return func(*args, **kwargs)
|
| 32 |
+
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/transformers/modeling_utils.py", line 5048, in from_pretrained
|
| 33 |
) = cls._load_pretrained_model(
|
| 34 |
~~~~~~~~~~~~~~~~~~~~~~~~~~^
|
| 35 |
model,
|
| 36 |
^^^^^^
|
| 37 |
+
...<12 lines>...
|
| 38 |
+
weights_only=weights_only,
|
| 39 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 40 |
)
|
| 41 |
^
|
| 42 |
+
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/transformers/modeling_utils.py", line 5468, in _load_pretrained_model
|
| 43 |
+
_error_msgs, disk_offload_index = load_shard_file(args)
|
| 44 |
+
~~~~~~~~~~~~~~~^^^^^^
|
| 45 |
+
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/transformers/modeling_utils.py", line 843, in load_shard_file
|
| 46 |
+
disk_offload_index = _load_state_dict_into_meta_model(
|
| 47 |
+
model,
|
| 48 |
+
...<8 lines>...
|
| 49 |
+
device_mesh=device_mesh,
|
| 50 |
+
)
|
| 51 |
+
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/torch/utils/_contextlib.py", line 120, in decorate_context
|
| 52 |
+
return func(*args, **kwargs)
|
| 53 |
+
File "/tmp/.cache/uv/environments-v2/e2cbecedea9f84ce/lib/python3.13/site-packages/transformers/modeling_utils.py", line 770, in _load_state_dict_into_meta_model
|
| 54 |
+
_load_parameter_into_model(model, param_name, param.to(param_device))
|
| 55 |
+
~~~~~~~~^^^^^^^^^^^^^^
|
| 56 |
+
torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 260.00 MiB. GPU 0 has a total capacity of 22.03 GiB of which 33.12 MiB is free. Including non-PyTorch memory, this process has 21.99 GiB memory in use. Of the allocated memory 21.79 GiB is allocated by PyTorch, and 23.18 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
|