pretrained_base_model_path: "ckpts/sd-image-variations-diffusers" pretrained_vae_path: "ckpts/sd-vae-ft-mse" image_encoder_path: "ckpts/sd-image-variations-diffusers/image_encoder" #pretrained # denoising_unet_path: "ckpts/ViViD/denoising_unet.pth" # reference_unet_path: "ckpts/ViViD/reference_unet.pth" # pose_guider_path: "ckpts/ViViD/pose_guider.pth" #fuxian denoising_unet_path: "/mnt/lpai-dione/ssai/cvg/team/wjj/ViViD/vividfuxian1210/stage1/denoising_unet-100.pth" reference_unet_path: "/mnt/lpai-dione/ssai/cvg/team/wjj/ViViD/vividfuxian1210/stage1/reference_unet-100.pth" pose_guider_path: "/mnt/lpai-dione/ssai/cvg/team/wjj/ViViD/vividfuxian1210/stage1/pose_guider-100.pth" motion_module_path: "ckpts/MotionModule/mm_sd_v15_v2.ckpt" inference_config: "./configs/inference/inference.yaml" weight_dtype: 'fp16' L: 36 seed: 52 model_video_paths: - "/mnt/lpai-dione/ssai/cvg/team/wjj/ViViD/dataset/ViViD/dresses/videos/803128_detail.mp4" cloth_image_paths: - "/mnt/lpai-dione/ssai/cvg/team/wjj/ViViD/dataset/ViViD/dresses/images/812294_in_xl.jpg"