{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "faf9556d", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/home/ubuntu/Qwen-Image-Edit-Angles\n" ] } ], "source": [ "%cd /home/ubuntu/Qwen-Image-Edit-Angles" ] }, { "cell_type": "code", "execution_count": 2, "id": "d74b1b7e", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/usr/lib/python3/dist-packages/sklearn/utils/fixes.py:25: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81.\n", " from pkg_resources import parse_version # type: ignore\n", "2025-11-23 10:48:20.190181: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", "2025-11-23 10:48:20.204255: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", "E0000 00:00:1763894900.221429 2465541 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "E0000 00:00:1763894900.227066 2465541 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "W0000 00:00:1763894900.240375 2465541 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "W0000 00:00:1763894900.240390 2465541 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "W0000 00:00:1763894900.240392 2465541 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "W0000 00:00:1763894900.240394 2465541 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "2025-11-23 10:48:20.244577: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", "To enable the following instructions: AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" ] }, { "ename": "AttributeError", "evalue": "'MessageFactory' object has no attribute 'GetPrototype'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", "\u001b[0;31mAttributeError\u001b[0m: 'MessageFactory' object has no attribute 'GetPrototype'" ] }, { "ename": "AttributeError", "evalue": "'MessageFactory' object has no attribute 'GetPrototype'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", "\u001b[0;31mAttributeError\u001b[0m: 'MessageFactory' object has no attribute 'GetPrototype'" ] }, { "ename": "AttributeError", "evalue": "'MessageFactory' object has no attribute 'GetPrototype'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", "\u001b[0;31mAttributeError\u001b[0m: 'MessageFactory' object has no attribute 'GetPrototype'" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/home/ubuntu/.local/lib/python3.10/site-packages/google/api_core/_python_version_support.py:266: FutureWarning: You are using a Python version (3.10.12) which Google will stop supporting in new releases of google.api_core once it reaches its end of life (2026-10-04). Please upgrade to the latest Python version, or at least Python 3.11, to continue receiving updates for google.api_core past that date.\n", " warnings.warn(message, FutureWarning)\n" ] }, { "ename": "AttributeError", "evalue": "'MessageFactory' object has no attribute 'GetPrototype'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", "\u001b[0;31mAttributeError\u001b[0m: 'MessageFactory' object has no attribute 'GetPrototype'" ] }, { "ename": "AttributeError", "evalue": "'MessageFactory' object has no attribute 'GetPrototype'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", "\u001b[0;31mAttributeError\u001b[0m: 'MessageFactory' object has no attribute 'GetPrototype'" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Skipping import of cpp extensions due to incompatible torch version 2.9.1+cu128 for torchao version 0.14.1 Please see https://github.com/pytorch/ao/issues/2919 for more info\n", "TMA benchmarks will be running without grid constant TMA descriptor.\n", "WARNING:bitsandbytes.cextension:Could not find the bitsandbytes CUDA binary at PosixPath('/usr/local/lib/python3.10/dist-packages/bitsandbytes/libbitsandbytes_cuda128.so')\n", "ERROR:bitsandbytes.cextension:Could not load bitsandbytes native library: /lib/x86_64-linux-gnu/libstdc++.so.6: version `GLIBCXX_3.4.32' not found (required by /usr/local/lib/python3.10/dist-packages/bitsandbytes/libbitsandbytes_cpu.so)\n", "Traceback (most recent call last):\n", " File \"/usr/local/lib/python3.10/dist-packages/bitsandbytes/cextension.py\", line 85, in \n", " lib = get_native_library()\n", " File \"/usr/local/lib/python3.10/dist-packages/bitsandbytes/cextension.py\", line 72, in get_native_library\n", " dll = ct.cdll.LoadLibrary(str(binary_path))\n", " File \"/usr/lib/python3.10/ctypes/__init__.py\", line 452, in LoadLibrary\n", " return self._dlltype(name)\n", " File \"/usr/lib/python3.10/ctypes/__init__.py\", line 374, in __init__\n", " self._handle = _dlopen(self._name, mode)\n", "OSError: /lib/x86_64-linux-gnu/libstdc++.so.6: version `GLIBCXX_3.4.32' not found (required by /usr/local/lib/python3.10/dist-packages/bitsandbytes/libbitsandbytes_cpu.so)\n", "WARNING:bitsandbytes.cextension:\n", "CUDA Setup failed despite CUDA being available. Please run the following command to get more information:\n", "\n", "python -m bitsandbytes\n", "\n", "Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them\n", "to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes\n", "and open an issue at: https://github.com/bitsandbytes-foundation/bitsandbytes/issues\n", "\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "f70e31b9ba79496a921f0e7d0cddfed4", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Fetching 7 files: 0%| | 0/7 [00:00 of len2\n" ] } ], "source": [ "src = StyleImagetoImageSource(\n", " csv_path=\"/data/chatgpt-style-transfer-data/output/results.csv\",\n", " base_dir=\"/data/chatgpt-style-transfer-data\",\n", " style_title=\"Simpsons\",\n", " data_range=[2, 35],\n", ")" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " of len33\n" ] } ], "source": [ "src = StyleImagetoImageSource(\n", " csv_path=\"/data/chatgpt-style-transfer-data/output/results.csv\",\n", " base_dir=\"/data/chatgpt-style-transfer-data\",\n", " style_title=\"Simpsons\",\n", " data_range=[0, 2],\n", ")" ] }, { "cell_type": "code", "execution_count": null, "id": "b7b70d58", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 3, "id": "ba2e8778", "metadata": {}, "outputs": [], "source": [ "\n", "\n", "from qwenimage.datasets import StyleSource\n", "\n", "\n", "src = StyleSource(\"/data/styles-finetune-data-artistic/tarot\", \"<0001>\")" ] }, { "cell_type": "code", "execution_count": 4, "id": "eda50bdf", "metadata": {}, "outputs": [], "source": [ "from wandml.data.tasks.text_to_image import TextToImageTask\n", "\n", "task = TextToImageTask()" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/ubuntu/wand-ml/wandml/core/source.py:14: UserWarning: Deprecated: Use data_types instead of _data_types\n", " warnings.warn(\"Deprecated: Use data_types instead of _data_types\")\n" ] } ], "source": [ "dp = WandDataPipe()\n", "dp.add_source(src)\n", "dp.set_task(task)" ] }, { "cell_type": "code", "execution_count": 6, "id": "b98b9368", "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "c4aecbbe70e8441c8f7f7a15ff5a95f6", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Fetching 7 files: 0%| | 0/7 [00:00 instance_data_dir=None class_data_dir=None instance_prompt=None class_prompt=None num_class_images=10 output_dir='output' seed=None size=1024 center_crop=False train_batch_size=1 num_train_epochs=1 max_train_steps=None save_steps=1000 save_path=None gradient_accumulation_steps=1 learning_rate=0.001 learning_rate_1d=1e-06 scale_lr=False lr_scheduler='constant' lr_warmup_steps=0 base_lr=1e-05 max_lr=0.001 step_size_up=2000 cyclic_lr_mode= cyclic_lr_cycle_momentum=False optim= adam_beta1=0.9 adam_beta2=0.999 adam_weight_decay=0.01 adam_epsilon=1e-08 max_grad_norm=1.0 mixed_precision='bf16' concepts_list=None modifier_tokens=None initializer_tokens=None checkpointing_steps=9999 resume_from_checkpoint=None train_text_encoder=True gcs_bucket=None topic_id='finetune-complete' concepts=None global_step=0 prior_loss_weight=1.0 wand_user_id='test' wand_model_id='testing' wand_model_bucket='wand-finetune' wand_project_name='wand-finetune' num_sample_images=5 prodigy_beta3=None prodigy_decouple=True prodigy_use_bias_correction=False prodigy_safeguard_warmup=False base_cache_dir=PosixPath('/data/wand_cache') num_validation_images=30 log_batch_steps=100 run_name=None record_training=True validation_steps=500 train_sigma_distribution='linear' inference_sigma_distribution='shift' quantize=False gradient_checkpointing=False compile=False lora_map_save_params=False log_model_steps=None resume_optimizer=False sample_steps=500 upload_optimizer=False early_stop=False preprocessing_epoch_len=128 train_regional=False preprocessing_epoch_repetitions=1 lora_rank=16 ema=False composite_reference=False train_color_fix=False num_workers=None wandb_entity='wand-tech' warmup_start_lr=0.0 lr_T_mult=1 lr_T_0=None logger_service='wandb' clearml_task_type='training' load_multi_view_lora=False train_max_sequence_length=512 train_dist='linear' train_shift=True inference_dist='linear' inference_shift=True static_mu=None loss_weight_dist=None\n" ] } ], "source": [ "trainer = ExperimentTrainer(foundation,dp,config)" ] }, { "cell_type": "code", "execution_count": 9, "id": "d92855c1", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33meleazhong\u001b[0m to \u001b[32mhttps://api.wandb.ai\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "wandb.init called with:\n", " project: wand-finetune\n", " entity: wand-tech\n", " name: None\n", " config: {'foundation': , 'instance_data_dir': None, 'class_data_dir': None, 'instance_prompt': None, 'class_prompt': None, 'num_class_images': 10, 'output_dir': 'output', 'seed': None, 'size': 1024, 'center_crop': False, 'train_batch_size': 1, 'num_train_epochs': 1, 'max_train_steps': None, 'save_steps': 1000, 'save_path': None, 'gradient_accumulation_steps': 1, 'learning_rate': 0.001, 'learning_rate_1d': 1e-06, 'scale_lr': False, 'lr_scheduler': 'constant', 'lr_warmup_steps': 0, 'base_lr': 1e-05, 'max_lr': 0.001, 'step_size_up': 2000, 'cyclic_lr_mode': , 'cyclic_lr_cycle_momentum': False, 'optim': , 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_weight_decay': 0.01, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'mixed_precision': 'bf16', 'concepts_list': None, 'modifier_tokens': None, 'initializer_tokens': None, 'checkpointing_steps': 9999, 'resume_from_checkpoint': None, 'train_text_encoder': True, 'gcs_bucket': None, 'topic_id': 'finetune-complete', 'concepts': None, 'global_step': 0, 'prior_loss_weight': 1.0, 'wand_user_id': 'test', 'wand_model_id': 'testing', 'wand_model_bucket': 'wand-finetune', 'wand_project_name': 'wand-finetune', 'num_sample_images': 5, 'prodigy_beta3': None, 'prodigy_decouple': True, 'prodigy_use_bias_correction': False, 'prodigy_safeguard_warmup': False, 'base_cache_dir': PosixPath('/data/wand_cache'), 'num_validation_images': 30, 'log_batch_steps': 100, 'run_name': None, 'record_training': True, 'validation_steps': 500, 'train_sigma_distribution': 'linear', 'inference_sigma_distribution': 'shift', 'quantize': False, 'gradient_checkpointing': False, 'compile': False, 'lora_map_save_params': False, 'log_model_steps': None, 'resume_optimizer': False, 'sample_steps': 500, 'upload_optimizer': False, 'early_stop': False, 'preprocessing_epoch_len': 128, 'train_regional': False, 'preprocessing_epoch_repetitions': 1, 'lora_rank': 16, 'ema': False, 'composite_reference': False, 'train_color_fix': False, 'num_workers': None, 'wandb_entity': 'wand-tech', 'warmup_start_lr': 0.0, 'lr_T_mult': 1, 'lr_T_0': None, 'logger_service': 'wandb', 'clearml_task_type': 'training', 'load_multi_view_lora': False, 'train_max_sequence_length': 512, 'train_dist': 'linear', 'train_shift': True, 'inference_dist': 'linear', 'inference_shift': True, 'static_mu': None, 'loss_weight_dist': None}\n", " tags: None\n", " kwargs: {'save_code': True}\n" ] }, { "data": { "text/html": [], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Tracking run with wandb version 0.23.0" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Run data is saved locally in /home/ubuntu/Qwen-Image-Edit-Angles/wandb/run-20251122_181330-lg6f3z2h" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "Syncing run graceful-galaxy-731 to Weights & Biases (docs)
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View project at https://wandb.ai/wand-tech/wand-finetune" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ " View run at https://wandb.ai/wand-tech/wand-finetune/runs/lg6f3z2h" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Using suggested max workers 26\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Train: 0%| | 0/6 [00:00\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m~/wand-ml/wandml/utils/debug.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mDEBUG\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 22\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 23\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0mstart_time\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mperf_counter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/wand-ml/wandml/trainers/experiment_trainer.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 295\u001b[0m \u001b[0mbatch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"epoch\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mepoch\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 296\u001b[0m \u001b[0mbatch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"split\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"train\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 297\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msingle_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 298\u001b[0m \u001b[0mbatch_num\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 299\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mglobal_step\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/wand-ml/wandml/trainers/experiment_trainer.py\u001b[0m in \u001b[0;36msingle_step\u001b[0;34m(self, batch)\u001b[0m\n\u001b[1;32m 334\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0maccelerator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maccumulate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 335\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0maccelerator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 336\u001b[0;31m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msingle_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 337\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mctimed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"accelerator.backward(loss)\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 338\u001b[0m \u001b[0maccelerator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/wand-ml/wandml/core/hooks.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmanager\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_pre_hooks\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 14\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 15\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmanager\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun_post_hooks\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/Qwen-Image-Edit-Angles/qwenimage/foundation.py\u001b[0m in \u001b[0;36msingle_step\u001b[0;34m(self, batch)\u001b[0m\n\u001b[1;32m 190\u001b[0m \u001b[0mbatch_size\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mx_0\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 191\u001b[0m \u001b[0mt\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtimestep_dist_utils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_train_t\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mseq_len\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mseq_len\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 192\u001b[0;31m \u001b[0mx_t\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m1.0\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mx_0\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mt\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mx_1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 193\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 194\u001b[0m \u001b[0ml_channels\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtransformer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0min_channels\u001b[0m \u001b[0;34m//\u001b[0m \u001b[0;36m4\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mRuntimeError\u001b[0m: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!" ] } ], "source": [ "trainer.train()" ] }, { "cell_type": "code", "execution_count": null, "id": "0eea7b23", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 5 }