ford442 commited on
Commit
c456413
·
verified ·
1 Parent(s): afb5890

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -190,9 +190,10 @@ def generate(
190
  state_file = f"rv_L_{segment-1}_{seed}.pt"
191
  state = torch.load(state_file, weights_only=False)
192
  # # TEST
 
 
193
  latents = state["intermediate_latents"].to("cuda") #, dtype=torch.bfloat16)
194
  guidance_scale = state["guidance_scale"]
195
- seed = state["seed"]
196
  all_timesteps_cpu = state["all_timesteps"]
197
  height = state["height"]
198
  width = state["width"]
@@ -200,7 +201,6 @@ def generate(
200
  timesteps_split_np = np.array_split(all_timesteps_cpu.numpy(), 2)
201
  timesteps_split_for_state = [chunk for chunk in timesteps_split_np]
202
  segment_timesteps = torch.from_numpy(timesteps_split_np[segment - 1]).to("cuda")
203
- generator = torch.Generator(device='cuda').manual_seed(seed)
204
  prompt_embeds = state["prompt_embeds"].to("cuda", dtype=torch.bfloat16)
205
  negative_prompt_embeds = state["negative_prompt_embeds"].to("cuda", dtype=torch.bfloat16)
206
  pooled_prompt_embeds = state["pooled_prompt_embeds"].to("cuda", dtype=torch.bfloat16)
 
190
  state_file = f"rv_L_{segment-1}_{seed}.pt"
191
  state = torch.load(state_file, weights_only=False)
192
  # # TEST
193
+ #seed = state["seed"]
194
+ generator = torch.Generator(device='cuda').manual_seed(seed)
195
  latents = state["intermediate_latents"].to("cuda") #, dtype=torch.bfloat16)
196
  guidance_scale = state["guidance_scale"]
 
197
  all_timesteps_cpu = state["all_timesteps"]
198
  height = state["height"]
199
  width = state["width"]
 
201
  timesteps_split_np = np.array_split(all_timesteps_cpu.numpy(), 2)
202
  timesteps_split_for_state = [chunk for chunk in timesteps_split_np]
203
  segment_timesteps = torch.from_numpy(timesteps_split_np[segment - 1]).to("cuda")
 
204
  prompt_embeds = state["prompt_embeds"].to("cuda", dtype=torch.bfloat16)
205
  negative_prompt_embeds = state["negative_prompt_embeds"].to("cuda", dtype=torch.bfloat16)
206
  pooled_prompt_embeds = state["pooled_prompt_embeds"].to("cuda", dtype=torch.bfloat16)