{ "cells": [ { "cell_type": "code", "execution_count": 8, "id": "81e4a1db", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Cloning into 'stable-diffusion'...\n", "remote: Enumerating objects: 64, done.\u001b[K\n", "remote: Counting objects: 100% (64/64), done.\u001b[K\n", "remote: Compressing objects: 100% (50/50), done.\u001b[K\n", "remote: Total 64 (delta 21), reused 56 (delta 14), pack-reused 0 (from 0)\u001b[K\n", "Receiving objects: 100% (64/64), 4.94 MiB | 4.07 MiB/s, done.\n", "Resolving deltas: 100% (21/21), done.\n" ] } ], "source": [ "!git clone -b CatVTON https://github.com/Harsh-Kesharwani/stable-diffusion.git" ] }, { "cell_type": "code", "execution_count": 9, "id": "9c89e320", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/kaggle/working/stable-diffusion/stable-diffusion\n" ] } ], "source": [ "cd stable-diffusion/" ] }, { "cell_type": "code", "execution_count": 10, "id": "8b304af3", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Already up to date.\n" ] } ], "source": [ "!git pull" ] }, { "cell_type": "code", "execution_count": 11, "id": "ff8b706c", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "--2025-06-11 17:24:50-- https://huggingface.co/sd-legacy/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt\n", "Resolving huggingface.co (huggingface.co)... 3.169.137.119, 3.169.137.111, 3.169.137.5, ...\n", "Connecting to huggingface.co (huggingface.co)|3.169.137.119|:443... connected.\n", "HTTP request sent, awaiting response... 307 Temporary Redirect\n", "Location: /stable-diffusion-v1-5/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt [following]\n", "--2025-06-11 17:24:51-- https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt\n", "Reusing existing connection to huggingface.co:443.\n", "HTTP request sent, awaiting response... 302 Found\n", "Location: https://cdn-lfs.hf.co/repos/f6/56/f656f0fa3b8a40ac76d297fa2a4b00f981e8eb1261963460764e7dd3b35ec97f/c6bbc15e3224e6973459ba78de4998b80b50112b0ae5b5c67113d56b4e366b19?response-content-disposition=inline%3B+filename*%3DUTF-8%27%27sd-v1-5-inpainting.ckpt%3B+filename%3D%22sd-v1-5-inpainting.ckpt%22%3B&Expires=1749665471&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTc0OTY2NTQ3MX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5oZi5jby9yZXBvcy9mNi81Ni9mNjU2ZjBmYTNiOGE0MGFjNzZkMjk3ZmEyYTRiMDBmOTgxZThlYjEyNjE5NjM0NjA3NjRlN2RkM2IzNWVjOTdmL2M2YmJjMTVlMzIyNGU2OTczNDU5YmE3OGRlNDk5OGI4MGI1MDExMmIwYWU1YjVjNjcxMTNkNTZiNGUzNjZiMTk%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=ZXutmEiKZlrjAVEWY0wBGEV3qP-fmlOKp-KUU986qmVWGbVc0yqD%7E3dI0UR00PWsggfcUgFElx8005cDkqH2n6NK-jMqh5KIjt8MEj-GhEf--WSY5OheifsHKwW04CSNMpit0sI4Noyr0vyxvGku-zOSMll6TmtGXwxsz5Y6VAzdBYMdx2Fv3CPkYgjw5ia2cBK53bkmHIEsjpDNIeEbF3Fk3ZizooRJumE-YBUAHYRs94H5AiOYMoSpTPsogKu-pfwFyuLL-ciVnUviqxju8gPtjIqAT8qhe7dKXbb2o3ppy%7E2gNsHSB2A%7Ezpuqa-dhHfVW7OZkamC6DRJKt8hHOQ__&Key-Pair-Id=K3RPWS32NSSJCE [following]\n", "--2025-06-11 17:24:51-- https://cdn-lfs.hf.co/repos/f6/56/f656f0fa3b8a40ac76d297fa2a4b00f981e8eb1261963460764e7dd3b35ec97f/c6bbc15e3224e6973459ba78de4998b80b50112b0ae5b5c67113d56b4e366b19?response-content-disposition=inline%3B+filename*%3DUTF-8%27%27sd-v1-5-inpainting.ckpt%3B+filename%3D%22sd-v1-5-inpainting.ckpt%22%3B&Expires=1749665471&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTc0OTY2NTQ3MX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5oZi5jby9yZXBvcy9mNi81Ni9mNjU2ZjBmYTNiOGE0MGFjNzZkMjk3ZmEyYTRiMDBmOTgxZThlYjEyNjE5NjM0NjA3NjRlN2RkM2IzNWVjOTdmL2M2YmJjMTVlMzIyNGU2OTczNDU5YmE3OGRlNDk5OGI4MGI1MDExMmIwYWU1YjVjNjcxMTNkNTZiNGUzNjZiMTk%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qIn1dfQ__&Signature=ZXutmEiKZlrjAVEWY0wBGEV3qP-fmlOKp-KUU986qmVWGbVc0yqD%7E3dI0UR00PWsggfcUgFElx8005cDkqH2n6NK-jMqh5KIjt8MEj-GhEf--WSY5OheifsHKwW04CSNMpit0sI4Noyr0vyxvGku-zOSMll6TmtGXwxsz5Y6VAzdBYMdx2Fv3CPkYgjw5ia2cBK53bkmHIEsjpDNIeEbF3Fk3ZizooRJumE-YBUAHYRs94H5AiOYMoSpTPsogKu-pfwFyuLL-ciVnUviqxju8gPtjIqAT8qhe7dKXbb2o3ppy%7E2gNsHSB2A%7Ezpuqa-dhHfVW7OZkamC6DRJKt8hHOQ__&Key-Pair-Id=K3RPWS32NSSJCE\n", "Resolving cdn-lfs.hf.co (cdn-lfs.hf.co)... 3.169.121.44, 3.169.121.27, 3.169.121.78, ...\n", "Connecting to cdn-lfs.hf.co (cdn-lfs.hf.co)|3.169.121.44|:443... connected.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 4265437280 (4.0G) [binary/octet-stream]\n", "Saving to: ‘sd-v1-5-inpainting.ckpt’\n", "\n", "sd-v1-5-inpainting. 100%[===================>] 3.97G 401MB/s in 9.4s \n", "\n", "2025-06-11 17:25:00 (435 MB/s) - ‘sd-v1-5-inpainting.ckpt’ saved [4265437280/4265437280]\n", "\n" ] } ], "source": [ "!wget https://huggingface.co/sd-legacy/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt" ] }, { "cell_type": "code", "execution_count": 12, "id": "4c5198ca", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "attention.py encoder.py\t pipeline.py\t\t test.ipynb\n", "clip.py interface.py\t README.md\t\t training.ipynb\n", "ddpm.py merges.txt\t requirements.txt\t utils.py\n", "decoder.py model_converter.py sample_dataset\t VITON_Dataset.py\n", "diffusion.py model.py\t\t sd-v1-5-inpainting.ckpt vocab.json\n" ] } ], "source": [ "!ls" ] }, { "cell_type": "code", "execution_count": null, "id": "9041f108", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found existing installation: gdown 5.2.0\n", "Uninstalling gdown-5.2.0:\n", " Successfully uninstalled gdown-5.2.0\n" ] } ], "source": [ "# !pip uninstall gdown -y" ] }, { "cell_type": "code", "execution_count": null, "id": "a9c7b968", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Collecting gdown\n", " Downloading gdown-5.2.0-py3-none-any.whl.metadata (5.8 kB)\n", "Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.11/dist-packages (from gdown) (4.13.3)\n", "Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from gdown) (3.18.0)\n", "Requirement already satisfied: requests[socks] in /usr/local/lib/python3.11/dist-packages (from gdown) (2.32.3)\n", "Requirement already satisfied: tqdm in /usr/local/lib/python3.11/dist-packages (from gdown) (4.67.1)\n", "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.11/dist-packages (from beautifulsoup4->gdown) (2.6)\n", "Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.11/dist-packages (from beautifulsoup4->gdown) (4.13.1)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests[socks]->gdown) (3.4.1)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/dist-packages (from requests[socks]->gdown) (3.10)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests[socks]->gdown) (2.3.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/dist-packages (from requests[socks]->gdown) (2025.1.31)\n", "Requirement already satisfied: PySocks!=1.5.7,>=1.5.6 in /usr/local/lib/python3.11/dist-packages (from requests[socks]->gdown) (1.7.1)\n", "Downloading gdown-5.2.0-py3-none-any.whl (18 kB)\n", "Installing collected packages: gdown\n", "Successfully installed gdown-5.2.0\n" ] } ], "source": [ "# !pip install -U --no-cache-dir gdown --pre" ] }, { "cell_type": "code", "execution_count": null, "id": "4467b7c7", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/usr/local/lib/python3.11/dist-packages/gdown/__main__.py:140: FutureWarning: Option `--id` was deprecated in version 4.3.1 and will be removed in 5.0. You don't need to pass it anymore to use a file ID.\n", " warnings.warn(\n", "Failed to retrieve file url:\n", "\n", "\tToo many users have viewed or downloaded this file recently. Please\n", "\ttry accessing the file again later. If the file you are trying to\n", "\taccess is particularly large or is shared with many people, it may\n", "\ttake up to 24 hours to be able to view or download the file. If you\n", "\tstill can't access a file after 24 hours, contact your domain\n", "\tadministrator.\n", "\n", "You may still be able to access the file from the browser:\n", "\n", "\thttps://drive.google.com/uc?id=1tLx8LRp-sxDp0EcYmYoV_vXdSc-jJ79w\n", "\n", "but Gdown can't. Please check connections and permissions.\n" ] } ], "source": [ "# !gdown --id 1tLx8LRp-sxDp0EcYmYoV_vXdSc-jJ79w\n" ] }, { "cell_type": "code", "execution_count": null, "id": "2653ceca", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "mkdir: cannot create directory ‘data’: File exists\n" ] } ], "source": [ "# !mkdir data\n", "# !mv test data\n", "# !mv train data\n", "# !mv test_pairs.txt data\n", "# !mv train_pairs.txt data" ] }, { "cell_type": "code", "execution_count": 56, "id": "a5d54cb4", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "agnostic_mask.png diffusion.py merges.txt\t requirements.txt\n", "attention.py\t dog.jpg\t model_converter.py sd-v1-5-inpainting.ckpt\n", "clip.py\t\t encoder.py\t model.py\t test.ipynb\n", "data\t\t garment.jpg\t person.jpg\t vocab.json\n", "ddpm.py\t\t image.png\t pipeline.py\t zalando-hd-resized.zip\n", "decoder.py\t interface.py README.md\n" ] } ], "source": [ "!ls" ] }, { "cell_type": "code", "execution_count": 57, "id": "f379e29c", "metadata": {}, "outputs": [], "source": [ "# cat data/train" ] }, { "cell_type": "code", "execution_count": 59, "id": "34cda0aa", "metadata": {}, "outputs": [], "source": [ "# !cat data/train_pairs.txt" ] }, { "cell_type": "code", "execution_count": 13, "id": "53095103", "metadata": {}, "outputs": [], "source": [ "!mkdir output\n", "!mkdir checkpoints" ] }, { "cell_type": "code", "execution_count": null, "id": "dcb8885d", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: diffusers in /usr/local/lib/python3.11/dist-packages (0.32.2)\n", "Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.11/dist-packages (from diffusers) (8.6.1)\n", "Requirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from diffusers) (3.18.0)\n", "Requirement already satisfied: huggingface-hub>=0.23.2 in /usr/local/lib/python3.11/dist-packages (from diffusers) (0.30.2)\n", "Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (from diffusers) (1.26.4)\n", "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.11/dist-packages (from diffusers) (2024.11.6)\n", "Requirement already satisfied: requests in /usr/local/lib/python3.11/dist-packages (from diffusers) (2.32.3)\n", "Requirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.11/dist-packages (from diffusers) (0.5.2)\n", "Requirement already satisfied: Pillow in /usr/local/lib/python3.11/dist-packages (from diffusers) (11.1.0)\n", "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.23.2->diffusers) (2024.12.0)\n", "Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.23.2->diffusers) (24.2)\n", "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.23.2->diffusers) (6.0.2)\n", "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.23.2->diffusers) (4.67.1)\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.11/dist-packages (from huggingface-hub>=0.23.2->diffusers) (4.13.1)\n", "Requirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.11/dist-packages (from importlib-metadata->diffusers) (3.21.0)\n", "Requirement already satisfied: mkl_fft in /usr/local/lib/python3.11/dist-packages (from numpy->diffusers) (1.3.8)\n", "Requirement already satisfied: mkl_random in /usr/local/lib/python3.11/dist-packages (from numpy->diffusers) (1.2.4)\n", "Requirement already satisfied: mkl_umath in /usr/local/lib/python3.11/dist-packages (from numpy->diffusers) (0.1.1)\n", "Requirement already satisfied: mkl in /usr/local/lib/python3.11/dist-packages (from numpy->diffusers) (2025.1.0)\n", "Requirement already satisfied: tbb4py in /usr/local/lib/python3.11/dist-packages (from numpy->diffusers) (2022.1.0)\n", "Requirement already satisfied: mkl-service in /usr/local/lib/python3.11/dist-packages (from numpy->diffusers) (2.4.1)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests->diffusers) (3.4.1)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/dist-packages (from requests->diffusers) (3.10)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/dist-packages (from requests->diffusers) (2.3.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/dist-packages (from requests->diffusers) (2025.1.31)\n", "Requirement already satisfied: intel-openmp<2026,>=2024 in /usr/local/lib/python3.11/dist-packages (from mkl->numpy->diffusers) (2024.2.0)\n", "Requirement already satisfied: tbb==2022.* in /usr/local/lib/python3.11/dist-packages (from mkl->numpy->diffusers) (2022.1.0)\n", "Requirement already satisfied: tcmlib==1.* in /usr/local/lib/python3.11/dist-packages (from tbb==2022.*->mkl->numpy->diffusers) (1.2.0)\n", "Requirement already satisfied: intel-cmplr-lib-rt in /usr/local/lib/python3.11/dist-packages (from mkl_umath->numpy->diffusers) (2024.2.0)\n", "Requirement already satisfied: intel-cmplr-lib-ur==2024.2.0 in /usr/local/lib/python3.11/dist-packages (from intel-openmp<2026,>=2024->mkl->numpy->diffusers) (2024.2.0)\n" ] } ], "source": [ "!pip install diffusers" ] }, { "cell_type": "code", "execution_count": 139, "id": "7efe325c", "metadata": {}, "outputs": [], "source": [ "import torch\n", "import gc\n", "\n", "# Delete all tensors and force garbage collection\n", "torch.cuda.empty_cache() # Clears unused memory\n", "gc.collect() # Python garbage collection\n", "\n", "# If you want to delete specific variables:\n", "for obj in dir():\n", " if 'cuda' in str(locals()[obj]):\n", " del locals()[obj]\n", "gc.collect()\n", "torch.cuda.empty_cache()\n" ] }, { "cell_type": "code", "execution_count": 140, "id": "a48f2753", "metadata": {}, "outputs": [ { "ename": "KeyError", "evalue": "'_oh'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/tmp/ipykernel_71/1017109895.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mempty_cache\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Release unused GPU memory\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mgc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcollect\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Run Python garbage collector\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/IPython/core/displayhook.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, result)\u001b[0m\n\u001b[1;32m 261\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwrite_output_prompt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 262\u001b[0m \u001b[0mformat_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmd_dict\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_format_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 263\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate_user_ns\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 264\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfill_exec_result\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 265\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mformat_dict\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/IPython/core/displayhook.py\u001b[0m in \u001b[0;36mupdate_user_ns\u001b[0;34m(self, result)\u001b[0m\n\u001b[1;32m 199\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[0;31m# Avoid recursive reference when displaying _oh/Out\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 201\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcache_size\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshell\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muser_ns\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'_oh'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 202\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshell\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muser_ns\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'_oh'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcache_size\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdo_full_cache\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 203\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcull_cache\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mKeyError\u001b[0m: '_oh'" ] } ], "source": [ "import torch\n", "import gc\n", "\n", "torch.cuda.empty_cache() # Release unused GPU memory\n", "gc.collect() # Run Python garbage collector" ] }, { "cell_type": "code", "execution_count": 141, "id": "5a57d765", "metadata": {}, "outputs": [], "source": [ "import torch\n", "import gc\n", "\n", "# Clear CUDA cache and collect garbage\n", "torch.cuda.empty_cache()\n", "gc.collect()\n", "\n", "# Delete all user-defined variables except for built-ins and modules\n", "for var in list(globals()):\n", " if not var.startswith(\"__\") and var not in [\"torch\", \"gc\"]:\n", " del globals()[var]\n", "\n", "gc.collect()\n", "torch.cuda.empty_cache()" ] }, { "cell_type": "code", "execution_count": 142, "id": "5957ec57", "metadata": {}, "outputs": [], "source": [ "import tensorflow as tf\n", "tf.keras.backend.clear_session()" ] }, { "cell_type": "code", "execution_count": 143, "id": "796e8ef7", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "GPU memory used: 17.12 MB / 15095.06 MB\n" ] } ], "source": [ "import torch\n", "\n", "if torch.cuda.is_available():\n", " used = torch.cuda.memory_allocated() / 1024 ** 2 # in MB\n", " total = torch.cuda.get_device_properties(0).total_memory / 1024 ** 2 # in MB\n", " print(f\"GPU memory used: {used:.2f} MB / {total:.2f} MB\")\n", "else:\n", " print(\"CUDA is not available.\")" ] }, { "cell_type": "code", "execution_count": 144, "id": "32ed173e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Total RAM: 31.35 GB\n", "Available RAM: 23.97 GB\n" ] } ], "source": [ "import psutil\n", "\n", "mem = psutil.virtual_memory()\n", "total_ram = mem.total / (1024 ** 3) # in GB\n", "available_ram = mem.available / (1024 ** 3) # in GB\n", "print(f\"Total RAM: {total_ram:.2f} GB\")\n", "print(f\"Available RAM: {available_ram:.2f} GB\")" ] }, { "cell_type": "code", "execution_count": null, "id": "d13441b5", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "3ce888b6", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "081c5b70", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2025-06-11 17:26:05.199950: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", "E0000 00:00:1749662765.402784 71 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "E0000 00:00:1749662765.463921 71 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n" ] }, { "ename": "KeyboardInterrupt", "evalue": "", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", "\u001b[0;32m/tmp/ipykernel_71/1242232676.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mmodel\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpreload_models_from_standard_weights\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mddpm\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mDDPMSampler\u001b[0m \u001b[0;31m# Fixed import\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 23\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mutils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcheck_inputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcompute_vae_encodings\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mget_time_embedding\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprepare_image\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprepare_mask_image\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 24\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mdiffusers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtorch_utils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mrandn_tensor\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/kaggle/working/stable-diffusion/stable-diffusion/utils.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0maccelerate\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtyping\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mList\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mOptional\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTuple\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mSet\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mdiffusers\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mUNet2DConditionModel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mSchedulerMixin\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 13\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtqdm\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtqdm\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mPIL\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mImage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mImageFilter\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/lib/python3.11/importlib/_bootstrap.py\u001b[0m in \u001b[0;36m_handle_fromlist\u001b[0;34m(module, fromlist, import_, recursive)\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/utils/import_utils.py\u001b[0m in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 909\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_class_to_module\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 910\u001b[0m \u001b[0mmodule\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_class_to_module\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 911\u001b[0;31m \u001b[0mvalue\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodule\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 912\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 913\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mAttributeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"module {self.__name__} has no attribute {name}\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/utils/import_utils.py\u001b[0m in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 908\u001b[0m \u001b[0mvalue\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 909\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_class_to_module\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 910\u001b[0;31m \u001b[0mmodule\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_class_to_module\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 911\u001b[0m \u001b[0mvalue\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodule\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 912\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/utils/import_utils.py\u001b[0m in \u001b[0;36m_get_module\u001b[0;34m(self, module_name)\u001b[0m\n\u001b[1;32m 918\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_get_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodule_name\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 919\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 920\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mimportlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimport_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\".\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mmodule_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 921\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 922\u001b[0m raise RuntimeError(\n", "\u001b[0;32m/usr/lib/python3.11/importlib/__init__.py\u001b[0m in \u001b[0;36mimport_module\u001b[0;34m(name, package)\u001b[0m\n\u001b[1;32m 124\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 125\u001b[0m \u001b[0mlevel\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 126\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_bootstrap\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_gcd_import\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mlevel\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpackage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 127\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/models/unets/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mis_torch_available\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0munet_1d\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mUNet1DModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0munet_2d\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mUNet2DModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0munet_2d_condition\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mUNet2DConditionModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0munet_3d_condition\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mUNet3DConditionModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/models/unets/unet_2d.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0membeddings\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mGaussianFourierProjection\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTimestepEmbedding\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTimesteps\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodeling_utils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mModelMixin\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 24\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0munet_2d_blocks\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mUNetMidBlock2D\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mget_down_block\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mget_up_block\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 25\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/models/unets/unet_2d_blocks.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0mUpsample2D\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m )\n\u001b[0;32m---> 36\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtransformers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdual_transformer_2d\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mDualTransformer2DModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 37\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtransformers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtransformer_2d\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mTransformer2DModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/models/transformers/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mis_torch_available\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mauraflow_transformer_2d\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mAuraFlowTransformer2DModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mcogvideox_transformer_3d\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mCogVideoXTransformer3DModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mdit_transformer_2d\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mDiTTransformer2DModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mdual_transformer_2d\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mDualTransformer2DModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/models/transformers/cogvideox_transformer_3d.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m...\u001b[0m\u001b[0mconfiguration_utils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mConfigMixin\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mregister_to_config\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 22\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m...\u001b[0m\u001b[0mloaders\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mPeftAdapterMixin\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 23\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m...\u001b[0m\u001b[0mutils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mUSE_PEFT_BACKEND\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mis_torch_version\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlogging\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mscale_lora_layers\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0munscale_lora_layers\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m...\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtorch_utils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmaybe_allow_in_graph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/lib/python3.11/importlib/_bootstrap.py\u001b[0m in \u001b[0;36m_handle_fromlist\u001b[0;34m(module, fromlist, import_, recursive)\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/utils/import_utils.py\u001b[0m in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 908\u001b[0m \u001b[0mvalue\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 909\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_class_to_module\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 910\u001b[0;31m \u001b[0mmodule\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_class_to_module\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 911\u001b[0m \u001b[0mvalue\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodule\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 912\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/utils/import_utils.py\u001b[0m in \u001b[0;36m_get_module\u001b[0;34m(self, module_name)\u001b[0m\n\u001b[1;32m 918\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_get_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodule_name\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 919\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 920\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mimportlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimport_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\".\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mmodule_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 921\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 922\u001b[0m raise RuntimeError(\n", "\u001b[0;32m/usr/lib/python3.11/importlib/__init__.py\u001b[0m in \u001b[0;36mimport_module\u001b[0;34m(name, package)\u001b[0m\n\u001b[1;32m 124\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 125\u001b[0m \u001b[0mlevel\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 126\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_bootstrap\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_gcd_import\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mlevel\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpackage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 127\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/loaders/peft.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0mset_weights_and_activate_adapters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 37\u001b[0m )\n\u001b[0;32m---> 38\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mlora_base\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0m_fetch_state_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_func_optionally_disable_offloading\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 39\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0munet_loader_utils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0m_maybe_expand_lora_scales\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/diffusers/loaders/lora_base.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 50\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mis_transformers_available\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 51\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtransformers\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mPreTrainedModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 52\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 53\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodels\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlora\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtext_encoder_attn_modules\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtext_encoder_mlp_modules\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/lib/python3.11/importlib/_bootstrap.py\u001b[0m in \u001b[0;36m_handle_fromlist\u001b[0;34m(module, fromlist, import_, recursive)\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/utils/import_utils.py\u001b[0m in \u001b[0;36m__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 1953\u001b[0m \u001b[0mvalue\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mPlaceholder\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1954\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_class_to_module\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1955\u001b[0;31m \u001b[0mmodule\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_class_to_module\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1956\u001b[0m \u001b[0mvalue\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodule\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1957\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mname\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_modules\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/utils/import_utils.py\u001b[0m in \u001b[0;36m_get_module\u001b[0;34m(self, module_name)\u001b[0m\n\u001b[1;32m 1965\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_get_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodule_name\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1966\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1967\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mimportlib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimport_module\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\".\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mmodule_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1968\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1969\u001b[0m raise RuntimeError(\n", "\u001b[0;32m/usr/lib/python3.11/importlib/__init__.py\u001b[0m in \u001b[0;36mimport_module\u001b[0;34m(name, package)\u001b[0m\n\u001b[1;32m 124\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 125\u001b[0m \u001b[0mlevel\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 126\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_bootstrap\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_gcd_import\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mlevel\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpackage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlevel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 127\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/modeling_utils.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 67\u001b[0m \u001b[0mshard_and_distribute_module\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 68\u001b[0m )\n\u001b[0;32m---> 69\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss_utils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mLOSS_MAPPING\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 70\u001b[0m from .pytorch_utils import ( # noqa: F401\n\u001b[1;32m 71\u001b[0m \u001b[0mConv1D\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/loss/loss_utils.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnn\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mBCEWithLogitsLoss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mMSELoss\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 21\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mloss_deformable_detr\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mDeformableDetrForObjectDetectionLoss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDeformableDetrForSegmentationLoss\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 22\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mloss_for_object_detection\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mForObjectDetectionLoss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mForSegmentationLoss\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0mloss_grounding_dino\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mGroundingDinoForObjectDetectionLoss\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/loss/loss_deformable_detr.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnn\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimage_transforms\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcenter_to_corners_format\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mis_scipy_available\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m from .loss_for_object_detection import (\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/image_transforms.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mis_tf_available\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 47\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0mtensorflow\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 48\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mis_flax_available\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 47\u001b[0m \u001b[0m_tf2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 49\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0m__internal__\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 50\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0m__operators__\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0maudio\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/_api/v2/__internal__/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__internal__\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdecorator\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__internal__\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdispatch\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__internal__\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdistribute\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__internal__\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0meager_context\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__internal__\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mfeature_column\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/_api/v2/__internal__/distribute/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__internal__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcombinations\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__internal__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0minterim\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_api\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv2\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__internal__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmulti_process_runner\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/_api/v2/__internal__/distribute/combinations/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0msys\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0m_sys\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcombinations\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0menv\u001b[0m \u001b[0;31m# line: 456\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcombinations\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mgenerate\u001b[0m \u001b[0;31m# line: 365\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcombinations\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0min_main_process\u001b[0m \u001b[0;31m# line: 418\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/distribute/combinations.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclient\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0msession\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 33\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcollective_all_reduce_strategy\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 34\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdistribute_lib\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmulti_process_runner\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/distribute/collective_all_reduce_strategy.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprotobuf\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtensorflow_server_pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcollective_util\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 25\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcross_device_ops\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mcross_device_ops_lib\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 26\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcross_device_utils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdevice_util\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/distribute/cross_device_ops.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclient\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdevice_lib\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcollective_util\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 28\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcross_device_utils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 29\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdevice_util\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdistribute_utils\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/distribute/cross_device_utils.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcollective_util\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 22\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mvalues\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mvalue_lib\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 23\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0meager\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mbackprop_util\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0meager\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/distribute/values.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprotobuf\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mstruct_pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdevice_util\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 23\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdistribute_lib\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 24\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpacked_distributed_variable\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mpacked\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mreduce_util\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/distribute/distribute_lib.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 203\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mag_ctx\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mautograph_ctx\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimpl\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mapi\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mautograph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 205\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdataset_ops\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 206\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcollective_util\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 207\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdistribute\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdevice_util\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;31m# pylint: disable=unused-import\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 21\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mexperimental\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 22\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdataset_ops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mAUTOTUNE\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdataset_ops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mDataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/experimental/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 97\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[0;31m# pylint: disable=unused-import\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 99\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mservice\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 100\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbatching\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdense_to_ragged_batch\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 101\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbatching\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdense_to_sparse_batch\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/experimental/service/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 417\u001b[0m \"\"\"\n\u001b[1;32m 418\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 419\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata_service_ops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdistribute\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 420\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata_service_ops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mfrom_dataset_id\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 421\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata_service_ops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mregister_dataset\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/experimental/ops/data_service_ops.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprotobuf\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdata_service_pb2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtf2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 23\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcompression_ops\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 24\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mservice\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0m_pywrap_server_lib\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mservice\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0m_pywrap_utils_exp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/experimental/ops/compression_ops.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# ==============================================================================\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0;34m\"\"\"Ops for compressing and uncompressing dataset elements.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutil\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mstructure\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mgen_experimental_dataset_ops\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mged_ops\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/util/structure.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mresource_variable_ops\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtensor_array_ops\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 32\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mragged\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mragged_tensor\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 33\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplatform\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtf_logging\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mlogging\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtypes\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0minternal\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/ragged/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0mAPI\u001b[0m \u001b[0mdocstring\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mragged\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 27\u001b[0m \"\"\"\n\u001b[0;32m---> 28\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mragged\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mragged_tensor\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/ragged/ragged_tensor.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 3147\u001b[0m \u001b[0;31m# are registered. Ragged ops import RaggedTensor, so import at bottom of the\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3148\u001b[0m \u001b[0;31m# file to avoid a partially-initialized module error.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3149\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mragged\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mragged_ops\u001b[0m \u001b[0;31m# pylint: disable=unused-import, g-bad-import-order, g-import-not-at-top\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/ragged/ragged_ops.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;31m# pylint: disable=unused-import\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 27\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mragged\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mragged_array_ops\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 28\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mragged\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mragged_autograph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mragged\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mragged_batch_gather_ops\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mKeyboardInterrupt\u001b[0m: " ] } ], "source": [ "import os\n", "import json\n", "import random\n", "import argparse\n", "from pathlib import Path\n", "from typing import Dict, List, Tuple, Optional\n", "\n", "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "from torch.utils.data import Dataset, DataLoader\n", "from torch.optim import AdamW\n", "from torch.optim.lr_scheduler import CosineAnnealingLR\n", "\n", "import numpy as np\n", "from PIL import Image\n", "from tqdm import tqdm\n", "from VITON_Dataset import VITONHDTestDataset\n", "\n", "# Import your custom modules\n", "from model import preload_models_from_standard_weights\n", "from ddpm import DDPMSampler # Fixed import\n", "from utils import check_inputs, compute_vae_encodings, get_time_embedding, prepare_image, prepare_mask_image\n", "from diffusers.utils.torch_utils import randn_tensor\n", "\n", "class CatVTONTrainer:\n", " \"\"\"CatVTON Training Class with PEFT, CFG and DREAM support\"\"\"\n", " \n", " def __init__(\n", " self,\n", " models: Dict[str, nn.Module],\n", " train_dataloader: DataLoader,\n", " val_dataloader: Optional[DataLoader] = None,\n", " device: str = \"cuda\",\n", " learning_rate: float = 1e-5, # Updated to paper value\n", " num_epochs: int = 100,\n", " save_steps: int = 1000,\n", " output_dir: str = \"./checkpoints\",\n", " cfg_dropout_prob: float = 0.1,\n", " guidance_scale: float = 2.5,\n", " num_inference_steps: int = 50,\n", " gradient_accumulation_steps: int = 1,\n", " max_grad_norm: float = 1.0,\n", " use_peft: bool = True,\n", " dream_lambda: float = 10.0, # DREAM parameter\n", " resume_from_checkpoint: Optional[str] = None,\n", " use_mixed_precision: bool = True, # For memory optimization\n", " height=512,\n", " width=512,\n", " ):\n", " self.training = True\n", " self.models = models\n", " self.train_dataloader = train_dataloader\n", " self.val_dataloader = val_dataloader\n", " self.device = device\n", " self.learning_rate = learning_rate\n", " self.num_epochs = num_epochs\n", " self.save_steps = save_steps\n", " self.output_dir = Path(output_dir)\n", " self.cfg_dropout_prob = cfg_dropout_prob\n", " self.guidance_scale = guidance_scale\n", " self.num_inference_steps = num_inference_steps\n", " self.gradient_accumulation_steps = gradient_accumulation_steps\n", " self.max_grad_norm = max_grad_norm\n", " self.use_peft = use_peft\n", " self.dream_lambda = dream_lambda\n", " self.use_mixed_precision = use_mixed_precision\n", " self.height=height\n", " self.width=width\n", "\n", " self.encoder=self.models.get('encoder', None)\n", " self.decoder=self.models.get('decoder', None)\n", " self.diffusion=self.models.get('diffusion', None)\n", "\n", " self.generator = torch.Generator(device=device)\n", " \n", " # Create output directory\n", " self.output_dir.mkdir(parents=True, exist_ok=True)\n", " \n", " # Setup mixed precision training\n", " if self.use_mixed_precision:\n", " self.scaler = torch.cuda.amp.GradScaler()\n", "\n", " self.weight_dtype = torch.float16 if use_mixed_precision else torch.float32\n", " \n", " # Setup models and optimizers\n", " self._setup_training()\n", " \n", " # Initialize scheduler and sampler\n", " self.scheduler = DDPMSampler(self.generator, num_training_steps=1000)\n", " \n", " # Resume from checkpoint if provided\n", " self.global_step = 0\n", " self.current_epoch = 0\n", " if resume_from_checkpoint:\n", " self._load_checkpoint(resume_from_checkpoint)\n", " \n", " def _setup_training(self):\n", " \"\"\"Setup models for training with PEFT\"\"\"\n", " # Move models to device with mixed precision\n", " for name, model in self.models.items():\n", " model.to(self.device)\n", " if self.use_mixed_precision and name != 'encoder': # Keep encoder in float32 for stability\n", " model.half()\n", " \n", " # Freeze all parameters first\n", " for model in self.models.values():\n", " for param in model.parameters():\n", " param.requires_grad = False\n", " \n", " # Enable training for specific layers based on PEFT strategy\n", " if self.use_peft:\n", " self._enable_peft_training()\n", " else:\n", " # Enable full training for diffusion model\n", " for param in self.models['diffusion'].parameters():\n", " param.requires_grad = True\n", " \n", " # Collect trainable parameters\n", " trainable_params = []\n", " total_params = 0\n", " trainable_count = 0\n", " \n", " for name, model in self.models.items():\n", " for param_name, param in model.named_parameters():\n", " total_params += param.numel()\n", " if param.requires_grad:\n", " trainable_params.append(param)\n", " trainable_count += param.numel()\n", " \n", " print(f\"Total parameters: {total_params:,}\")\n", " print(f\"Trainable parameters: {trainable_count:,} ({trainable_count/total_params*100:.2f}%)\")\n", " \n", " # Verify we're close to the paper's 49.57M parameters for self-attention only\n", " if self.use_peft:\n", " expected_params = 49_570_000 # 49.57M\n", " if abs(trainable_count - expected_params) > 5_000_000: # 5M tolerance\n", " print(f\"Warning: Expected ~{expected_params:,} trainable parameters, got {trainable_count:,}\")\n", " \n", " # Setup optimizer - AdamW as per paper\n", " self.optimizer = AdamW(\n", " trainable_params,\n", " lr=self.learning_rate,\n", " betas=(0.9, 0.999),\n", " weight_decay=1e-2,\n", " eps=1e-8\n", " )\n", " \n", " # Setup learning rate scheduler (constant as per paper)\n", " # For constant LR, we can use a dummy scheduler\n", " self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(\n", " self.optimizer, lr_lambda=lambda epoch: 1.0\n", " )\n", " \n", " def _enable_peft_training(self):\n", " \"\"\"Enable PEFT training - only self-attention layers (49.57M parameters)\"\"\"\n", " print(\"Enabling PEFT training (self-attention layers only)\")\n", " \n", " unet = self.diffusion.unet\n", " \n", " # Enable attention layers in encoders\n", " for layers in [unet.encoders, unet.decoders]:\n", " for layer in layers:\n", " if hasattr(layer, 'attention'): # UNET_AttentionBlock\n", " for param in layer.attention.parameters():\n", " param.requires_grad = True\n", " elif hasattr(layer, 'attention_1'): # Alternative naming\n", " for param in layer.attention_1.parameters():\n", " param.requires_grad = True\n", " \n", " # Enable attention layers in bottleneck\n", " for layer in unet.bottleneck:\n", " if hasattr(layer, 'attention'):\n", " for param in layer.attention.parameters():\n", " param.requires_grad = True\n", " elif hasattr(layer, 'attention_1'):\n", " for param in layer.attention_1.parameters():\n", " param.requires_grad = True\n", " \n", " def _apply_cfg_dropout(self, garment_latent: torch.Tensor) -> torch.Tensor:\n", " \"\"\"Apply classifier-free guidance dropout (10% chance)\"\"\"\n", " if self.training and random.random() < self.cfg_dropout_prob:\n", " # Replace with zero tensor for unconditional generation\n", " return torch.zeros_like(garment_latent)\n", " return garment_latent\n", " \n", " def compute_loss(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:\n", " \"\"\"Compute MSE loss for denoising with DREAM strategy\"\"\"\n", " person_images = batch['person'].to(self.device)\n", " cloth_images = batch['cloth'].to(self.device)\n", " masks = batch['mask'].to(self.device)\n", " \n", " batch_size = person_images.shape[0]\n", "\n", " concat_dim = -2 # FIXME: y axis concat\n", " # Prepare inputs to Tensor\n", " image, condition_image, mask = check_inputs(person_images, cloth_images, masks, self.width, self.height)\n", " image = prepare_image(person_images).to(self.device, dtype=self.weight_dtype)\n", " condition_image = prepare_image(cloth_images).to(self.device, dtype=self.weight_dtype)\n", " mask = prepare_mask_image(masks).to(self.device, dtype=self.weight_dtype)\n", " # Mask image\n", " masked_image = image * (mask < 0.5)\n", "\n", " with torch.cuda.amp.autocast(enabled=self.use_mixed_precision):\n", " # VAE encoding\n", " masked_latent = compute_vae_encodings(masked_image, self.encoder)\n", " condition_latent = compute_vae_encodings(condition_image, self.encoder)\n", " mask_latent = torch.nn.functional.interpolate(mask, size=masked_latent.shape[-2:], mode=\"nearest\")\n", " del image, mask, condition_image\n", "\n", "\n", " # Apply CFG dropout to garment latent\n", " condition_latent = self._apply_cfg_dropout(condition_latent)\n", " \n", " # Concatenate latents\n", " masked_latent_concat = torch.cat([masked_latent, condition_latent], dim=concat_dim)\n", " mask_latent_concat = torch.cat([mask_latent, torch.zeros_like(mask_latent)], dim=concat_dim)\n", "\n", " target_latents = masked_latent_concat\n", "\n", " noise=randn_tensor(\n", " masked_latent_concat.shape,\n", " generator=self.generator,\n", " device=masked_latent_concat.device,\n", " dtype=self.weight_dtype,\n", " )\n", "\n", " timesteps = torch.randint(1, 1000, size=(1,))[0].long().item()\n", "\n", " timesteps_embedding=get_time_embedding(timesteps)\n", "\n", " # Add noise to latents\n", " noisy_latents = self.scheduler.add_noise(target_latents, timesteps, noise)\n", "\n", " non_inpainting_latent_model_input = noisy_latents\n", " inpainting_latent_model_input = torch.cat([\n", " non_inpainting_latent_model_input, \n", " mask_latent_concat, \n", " masked_latent_concat\n", " ], dim=1).to(self.device, dtype=self.weight_dtype)\n", "\n", " # DREAM strategy implementation\n", " if self.dream_lambda > 0:\n", " \n", " # print(f\"Model input shape: {model_input.shape}\")\n", " # print(f\"Time embeddings shape: {time_embeddings.shape}\")\n", " \n", " # Get initial noise prediction\n", " with torch.no_grad():\n", " epsilon_theta = self.diffusion(\n", " inpainting_latent_model_input,\n", " timesteps_embedding\n", " )\n", "\n", " # print(f\"Predicted noise shape: {epsilon_theta.shape}\")\n", " \n", " # Apply DREAM: zˆt = √αt*z0 + √(1-αt)*(ε + λ*εθ)\n", " alphas_cumprod = self.scheduler.alphas_cumprod.to(device=self.device, dtype=self.weight_dtype)\n", " sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n", " sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n", " \n", " # Reshape for broadcasting\n", " sqrt_alpha_prod = sqrt_alpha_prod.view(-1, 1, 1, 1)\n", " sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.view(-1, 1, 1, 1)\n", " \n", " # DREAM noise combination\n", " dream_noise = noise + self.dream_lambda * epsilon_theta\n", "\n", " dream_noisy_latents = sqrt_alpha_prod * target_latents + sqrt_one_minus_alpha_prod * dream_noise\n", "\n", " dream_model_input = torch.cat([\n", " dream_noisy_latents, \n", " mask_latent_concat, \n", " masked_latent_concat\n", " ], dim=1)\n", "\n", " predicted_noise= self.diffusion(\n", " dream_model_input,\n", " timesteps_embedding\n", " )\n", " # DREAM loss: |(ε + λεθ) - εθ(ẑt, t)|²\n", " loss = F.mse_loss(predicted_noise, dream_noise)\n", " else:\n", " # Standard training without DREAM\n", " predicted_noise = self.diffusion(\n", " inpainting_latent_model_input,\n", " timesteps_embedding,\n", " )\n", "\n", " # Standard MSE loss\n", " loss = F.mse_loss(predicted_noise, noise)\n", " \n", " return loss\n", " \n", " def train_epoch(self) -> float:\n", " \"\"\"Train for one epoch\"\"\"\n", " self.models['diffusion'].train()\n", " total_loss = 0.0\n", " num_batches = len(self.train_dataloader)\n", " \n", " progress_bar = tqdm(self.train_dataloader, desc=f\"Epoch {self.current_epoch+1}\")\n", " \n", " for step, batch in enumerate(progress_bar):\n", " # Compute loss with mixed precision\n", " if self.use_mixed_precision:\n", " with torch.cuda.amp.autocast():\n", " loss = self.compute_loss(batch)\n", " \n", " # Scale loss for gradient accumulation\n", " loss = loss / self.gradient_accumulation_steps\n", " \n", " # Backward pass with scaling\n", " self.scaler.scale(loss).backward()\n", " else:\n", " loss = self.compute_loss(batch)\n", " loss = loss / self.gradient_accumulation_steps\n", " loss.backward()\n", " \n", " # Gradient accumulation\n", " if (step + 1) % self.gradient_accumulation_steps == 0:\n", " if self.use_mixed_precision:\n", " # Unscale gradients and clip\n", " self.scaler.unscale_(self.optimizer)\n", " torch.nn.utils.clip_grad_norm_(\n", " [p for p in self.models['diffusion'].parameters() if p.requires_grad],\n", " self.max_grad_norm\n", " )\n", " \n", " # Optimizer step with scaling\n", " self.scaler.step(self.optimizer)\n", " self.scaler.update()\n", " else:\n", " # Clip gradients\n", " torch.nn.utils.clip_grad_norm_(\n", " [p for p in self.models['diffusion'].parameters() if p.requires_grad],\n", " self.max_grad_norm\n", " )\n", " self.optimizer.step()\n", " \n", " self.lr_scheduler.step()\n", " self.optimizer.zero_grad()\n", " self.global_step += 1\n", " \n", " total_loss += loss.item() * self.gradient_accumulation_steps\n", " \n", " # Update progress bar\n", " progress_bar.set_postfix({\n", " 'loss': loss.item() * self.gradient_accumulation_steps,\n", " 'lr': self.optimizer.param_groups[0]['lr'],\n", " 'step': self.global_step\n", " })\n", " \n", " # Save checkpoint\n", " if self.global_step % self.save_steps == 0:\n", " self._save_checkpoint()\n", " \n", " # Clear cache periodically to prevent OOM\n", " if step % 50 == 0:\n", " torch.cuda.empty_cache()\n", " \n", " return total_loss / num_batches\n", " \n", " def train(self):\n", " \"\"\"Main training loop\"\"\"\n", " print(f\"Starting training for {self.num_epochs} epochs\")\n", " print(f\"Total training steps: {self.num_epochs * len(self.train_dataloader)}\")\n", " print(f\"Using DREAM with lambda = {self.dream_lambda}\")\n", " print(f\"Mixed precision: {self.use_mixed_precision}\")\n", " \n", " \n", " for epoch in range(self.current_epoch, self.num_epochs):\n", " self.current_epoch = epoch\n", " \n", " # Train\n", " train_loss = self.train_epoch()\n", " \n", " print(f\"Epoch {epoch+1}/{self.num_epochs}\")\n", " print(f\"Train Loss: {train_loss:.6f}\")\n", " \n", " # Save epoch checkpoint\n", " if (epoch + 1) % 10 == 0:\n", " self._save_checkpoint(epoch_checkpoint=True)\n", " \n", " # Clear cache at end of epoch\n", " torch.cuda.empty_cache()\n", " \n", " def _save_checkpoint(self, is_best: bool = False, epoch_checkpoint: bool = False):\n", " \"\"\"Save model checkpoint\"\"\"\n", " checkpoint = {\n", " 'global_step': self.global_step,\n", " 'current_epoch': self.current_epoch,\n", " 'diffusion_state_dict': self.models['diffusion'].state_dict(),\n", " 'optimizer_state_dict': self.optimizer.state_dict(),\n", " 'lr_scheduler_state_dict': self.lr_scheduler.state_dict(),\n", " 'dream_lambda': self.dream_lambda,\n", " 'use_peft': self.use_peft,\n", " }\n", " \n", " if self.use_mixed_precision:\n", " checkpoint['scaler_state_dict'] = self.scaler.state_dict()\n", " \n", " if is_best:\n", " checkpoint_path = self.output_dir / \"best_model.pth\"\n", " elif epoch_checkpoint:\n", " checkpoint_path = self.output_dir / f\"checkpoint_epoch_{self.current_epoch+1}.pth\"\n", " else:\n", " checkpoint_path = self.output_dir / f\"checkpoint_step_{self.global_step}.pth\"\n", " \n", " torch.save(checkpoint, checkpoint_path)\n", " print(f\"Checkpoint saved: {checkpoint_path}\")\n", " \n", " def _load_checkpoint(self, checkpoint_path: str):\n", " \"\"\"Load model checkpoint\"\"\"\n", " checkpoint = torch.load(checkpoint_path, map_location=self.device)\n", " \n", " self.global_step = checkpoint['global_step']\n", " self.current_epoch = checkpoint['current_epoch']\n", " self.dream_lambda = checkpoint.get('dream_lambda', 10.0)\n", " \n", " self.models['diffusion'].load_state_dict(checkpoint['diffusion_state_dict'])\n", " self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n", " self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])\n", " \n", " if self.use_mixed_precision and 'scaler_state_dict' in checkpoint:\n", " self.scaler.load_state_dict(checkpoint['scaler_state_dict'])\n", " \n", " print(f\"Checkpoint loaded: {checkpoint_path}\")\n", " print(f\"Resuming from epoch {self.current_epoch}, step {self.global_step}\")\n", "\n", "\n", "def create_dataloaders(args) -> Tuple[DataLoader, Optional[DataLoader]]:\n", " \"\"\"Create training and validation dataloaders\"\"\"\n", " # Dataset\n", " if args.dataset_name == \"vitonhd\":\n", " dataset = VITONHDTestDataset(args)\n", " else:\n", " raise ValueError(f\"Invalid dataset name {args.dataset}.\")\n", " print(f\"Dataset {args.dataset_name} loaded, total {len(dataset)} pairs.\")\n", " dataloader = DataLoader(\n", " dataset,\n", " batch_size=args.batch_size,\n", " shuffle=False,\n", " num_workers=args.dataloader_num_workers\n", " )\n", " \n", " return dataloader\n", "\n", "\n", "def main():\n", " args=argparse.Namespace()\n", " args.__dict__= {\n", " \"base_model_path\": \"inkpunk-diffusion-v1.ckpt\",\n", " \"resume_path\": \"zhengchong/CatVTON\",\n", " \"dataset_name\": \"vitonhd\",\n", " \"data_root_path\": \"/kaggle/input/viton-hd-dataset\",\n", " \"output_dir\": \"./output\",\n", " \"seed\": 42,\n", " \"batch_size\": 2,\n", " \"num_inference_steps\": 50,\n", " \"guidance_scale\": 2.5,\n", " \"width\": 384,\n", " \"height\": 512,\n", " \"repaint\": True,\n", " \"eval_pair\": False,\n", " \"concat_eval_results\": True,\n", " \"allow_tf32\": True,\n", " \"dataloader_num_workers\": 4,\n", " \"mixed_precision\": 'no',\n", " \"concat_axis\": 'y',\n", " \"enable_condition_noise\": True,\n", " \"device\":\"cuda\",\n", " \"num_training_steps\": 16000,\n", " \"learning_rate\": 1e-5,\n", " \"gradient_accumulation_steps\": 128, # Simulate batch size 128\n", " \"max_grad_norm\": 1.0,\n", " \"use_peft\": True,\n", " \"cfg_dropout_prob\": 0.1,\n", " \"dream_lambda\": 10.0,\n", " \"use_mixed_precision\": True,\n", " \"output_dir\": \"./checkpoints\",\n", " \"save_steps\": 1000,\n", " \"resume_from_checkpoint\": None,\n", " \"is_train\": True\n", " }\n", " \n", " # Calculate epochs from training steps\n", " # This will be calculated after dataloader creation\n", " \n", " # Set random seeds\n", " torch.manual_seed(args.seed)\n", " np.random.seed(args.seed)\n", " random.seed(args.seed)\n", " if torch.cuda.is_available():\n", " torch.cuda.manual_seed_all(args.seed)\n", " \n", " # Optimize CUDA settings for memory\n", " torch.backends.cudnn.benchmark = True\n", " torch.backends.cuda.matmul.allow_tf32 = True \n", " torch.set_float32_matmul_precision(\"high\")\n", "\n", " # Load pretrained models\n", " print(\"Loading pretrained models...\")\n", " models = preload_models_from_standard_weights(args.base_model_path, args.device)\n", " \n", " # Create dataloaders\n", " print(\"Creating dataloaders...\")\n", " train_dataloader = create_dataloaders(args)\n", " \n", " # Calculate epochs from training steps\n", " steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps\n", " num_epochs = (args.num_training_steps + steps_per_epoch - 1) // steps_per_epoch\n", " print(f\"Training for {num_epochs} epochs ({args.num_training_steps} steps)\")\n", " args.num_epochs = num_epochs\n", " print(f\"Steps per epoch: {steps_per_epoch}\")\n", " print(f\"Total training steps: {args.num_training_steps}\")\n", " print(f\"Total epochs: {num_epochs}\")\n", " # Initialize trainer\n", " print(\"Initializing trainer...\") \n", " trainer = CatVTONTrainer(\n", " models=models,\n", " train_dataloader=train_dataloader,\n", " device=args.device,\n", " learning_rate=args.learning_rate,\n", " num_epochs=args.num_epochs,\n", " save_steps=args.save_steps,\n", " output_dir=args.output_dir,\n", " cfg_dropout_prob=args.cfg_dropout_prob,\n", " guidance_scale=args.guidance_scale,\n", " num_inference_steps=50, # Fixed as per paper\n", " gradient_accumulation_steps=args.gradient_accumulation_steps,\n", " max_grad_norm=args.max_grad_norm,\n", " use_peft=args.use_peft,\n", " dream_lambda=args.dream_lambda,\n", " resume_from_checkpoint=args.resume_from_checkpoint,\n", " use_mixed_precision=args.use_mixed_precision\n", " )\n", " # Start training\n", " print(\"Starting training...\")\n", " trainer.train() \n", "\n", "if __name__ == \"__main__\":\n", " main()\n" ] }, { "cell_type": "code", "execution_count": null, "id": "2eff454d", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "2eefd6bc", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.11" } }, "nbformat": 4, "nbformat_minor": 5 }