Fabrice-TIERCELIN commited on
Commit
ebd4480
·
verified ·
1 Parent(s): dd4b164

Up to date dev version

Browse files
Files changed (1) hide show
  1. gradio_demo.py +44 -51
gradio_demo.py CHANGED
@@ -160,7 +160,6 @@ def stage2_process(
160
  if 1 < downscale:
161
  input_height, input_width, input_channel = np.array(input_image).shape
162
  input_image = input_image.resize((input_width // downscale, input_height // downscale), Image.LANCZOS)
163
- torch.cuda.set_device(SUPIR_device)
164
  event_id = str(time.time_ns())
165
  event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt,
166
  'n_prompt': n_prompt, 'num_samples': num_samples, 'upscale': upscale, 'edm_steps': edm_steps,
@@ -181,24 +180,8 @@ def stage2_process(
181
  input_image = upscale_image(input_image, upscale, unit_resolution=32,
182
  min_size=min_size)
183
 
184
- LQ = np.array(input_image) / 255.0
185
- LQ = np.power(LQ, gamma_correction)
186
- LQ *= 255.0
187
- LQ = LQ.round().clip(0, 255).astype(np.uint8)
188
- LQ = LQ / 255 * 2 - 1
189
- LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
190
- if use_llava:
191
- captions = [prompt]
192
- else:
193
- captions = ['']
194
-
195
- model.ae_dtype = convert_dtype(ae_dtype)
196
- model.model.dtype = convert_dtype(diff_dtype)
197
-
198
- samples = restore(
199
  model,
200
- LQ,
201
- captions,
202
  edm_steps,
203
  s_stage1,
204
  s_churn,
@@ -216,6 +199,48 @@ def stage2_process(
216
  spt_linear_s_stage2
217
  )
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  x_samples = (einops.rearrange(samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().round().clip(
220
  0, 255).astype(np.uint8)
221
  results = [x_samples[i] for i in range(num_samples)]
@@ -252,33 +277,6 @@ def stage2_process(
252
  # Only one image can be shown in the slider
253
  return [noisy_image] + [results[0]], gr.update(format = output_format, value = [noisy_image] + results), gr.update(value = information, visible = True), event_id
254
 
255
- @spaces.GPU(duration=600)
256
- def restore(
257
- model,
258
- LQ,
259
- captions,
260
- edm_steps,
261
- s_stage1,
262
- s_churn,
263
- s_noise,
264
- s_cfg,
265
- s_stage2,
266
- seed,
267
- num_samples,
268
- a_prompt,
269
- n_prompt,
270
- color_fix_type,
271
- linear_CFG,
272
- linear_s_stage2,
273
- spt_linear_CFG,
274
- spt_linear_s_stage2
275
- ):
276
- return model.batchify_sample(LQ, captions, num_steps=edm_steps, restoration_scale=s_stage1, s_churn=s_churn,
277
- s_noise=s_noise, cfg_scale=s_cfg, control_scale=s_stage2, seed=seed,
278
- num_samples=num_samples, p_p=a_prompt, n_p=n_prompt, color_fix_type=color_fix_type,
279
- use_linear_CFG=linear_CFG, use_linear_control_scale=linear_s_stage2,
280
- cfg_scale_start=spt_linear_CFG, control_scale_start=spt_linear_s_stage2)
281
-
282
  def load_and_reset(param_setting):
283
  print('load_and_reset ==>>')
284
  if torch.cuda.device_count() == 0:
@@ -346,15 +344,10 @@ title_html = """
346
 
347
  claim_md = """
348
  ## **Piracy**
349
-
350
  The images are not stored but the logs are saved during a month.
351
-
352
  ## **Terms of use**
353
-
354
  By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. Please submit a feedback to us if you get any inappropriate answer! We will collect those to keep improving our models. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
355
-
356
  ## **License**
357
-
358
  The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/Fanghua-Yu/SUPIR) of SUPIR.
359
  """
360
 
@@ -552,4 +545,4 @@ with gr.Blocks(title="SUPIR") as interface:
552
  fb_text
553
  ])
554
 
555
- interface.queue(10).launch()
 
160
  if 1 < downscale:
161
  input_height, input_width, input_channel = np.array(input_image).shape
162
  input_image = input_image.resize((input_width // downscale, input_height // downscale), Image.LANCZOS)
 
163
  event_id = str(time.time_ns())
164
  event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt,
165
  'n_prompt': n_prompt, 'num_samples': num_samples, 'upscale': upscale, 'edm_steps': edm_steps,
 
180
  input_image = upscale_image(input_image, upscale, unit_resolution=32,
181
  min_size=min_size)
182
 
183
+ result_slider, result_gallery, restore_information, event_id = restore(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  model,
 
 
185
  edm_steps,
186
  s_stage1,
187
  s_churn,
 
199
  spt_linear_s_stage2
200
  )
201
 
202
+ return result_slider, result_gallery, restore_information, event_id
203
+
204
+ @spaces.GPU(duration=600)
205
+ def restore(
206
+ model,
207
+ edm_steps,
208
+ s_stage1,
209
+ s_churn,
210
+ s_noise,
211
+ s_cfg,
212
+ s_stage2,
213
+ seed,
214
+ num_samples,
215
+ a_prompt,
216
+ n_prompt,
217
+ color_fix_type,
218
+ linear_CFG,
219
+ linear_s_stage2,
220
+ spt_linear_CFG,
221
+ spt_linear_s_stage2
222
+ ):
223
+ torch.cuda.set_device(SUPIR_device)
224
+ LQ = np.array(input_image) / 255.0
225
+ LQ = np.power(LQ, gamma_correction)
226
+ LQ *= 255.0
227
+ LQ = LQ.round().clip(0, 255).astype(np.uint8)
228
+ LQ = LQ / 255 * 2 - 1
229
+ LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
230
+ if use_llava:
231
+ captions = [prompt]
232
+ else:
233
+ captions = ['']
234
+
235
+ model.ae_dtype = convert_dtype(ae_dtype)
236
+ model.model.dtype = convert_dtype(diff_dtype)
237
+
238
+ samples = model.batchify_sample(LQ, captions, num_steps=edm_steps, restoration_scale=s_stage1, s_churn=s_churn,
239
+ s_noise=s_noise, cfg_scale=s_cfg, control_scale=s_stage2, seed=seed,
240
+ num_samples=num_samples, p_p=a_prompt, n_p=n_prompt, color_fix_type=color_fix_type,
241
+ use_linear_CFG=linear_CFG, use_linear_control_scale=linear_s_stage2,
242
+ cfg_scale_start=spt_linear_CFG, control_scale_start=spt_linear_s_stage2)
243
+
244
  x_samples = (einops.rearrange(samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().round().clip(
245
  0, 255).astype(np.uint8)
246
  results = [x_samples[i] for i in range(num_samples)]
 
277
  # Only one image can be shown in the slider
278
  return [noisy_image] + [results[0]], gr.update(format = output_format, value = [noisy_image] + results), gr.update(value = information, visible = True), event_id
279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  def load_and_reset(param_setting):
281
  print('load_and_reset ==>>')
282
  if torch.cuda.device_count() == 0:
 
344
 
345
  claim_md = """
346
  ## **Piracy**
 
347
  The images are not stored but the logs are saved during a month.
 
348
  ## **Terms of use**
 
349
  By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. Please submit a feedback to us if you get any inappropriate answer! We will collect those to keep improving our models. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
 
350
  ## **License**
 
351
  The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/Fanghua-Yu/SUPIR) of SUPIR.
352
  """
353
 
 
545
  fb_text
546
  ])
547
 
548
+ interface.queue(10).launch()