sd_samplers_common.py 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. from collections import namedtuple
  2. import numpy as np
  3. import torch
  4. from PIL import Image
  5. from modules import devices, processing, images, sd_vae_approx, sd_samplers, sd_vae_taesd
  6. from modules.shared import opts, state
  7. import modules.shared as shared
  8. SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
  9. def setup_img2img_steps(p, steps=None):
  10. if opts.img2img_fix_steps or steps is not None:
  11. requested_steps = (steps or p.steps)
  12. steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
  13. t_enc = requested_steps - 1
  14. else:
  15. steps = p.steps
  16. t_enc = int(min(p.denoising_strength, 0.999) * steps)
  17. return steps, t_enc
  18. approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3}
  19. def single_sample_to_image(sample, approximation=None):
  20. if approximation is None:
  21. approximation = approximation_indexes.get(opts.show_progress_type, 0)
  22. if approximation == 2:
  23. x_sample = sd_vae_approx.cheap_approximation(sample) * 0.5 + 0.5
  24. elif approximation == 1:
  25. x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach() * 0.5 + 0.5
  26. elif approximation == 3:
  27. x_sample = sample * 1.5
  28. x_sample = sd_vae_taesd.model()(x_sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
  29. else:
  30. x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0] * 0.5 + 0.5
  31. x_sample = torch.clamp(x_sample, min=0.0, max=1.0)
  32. x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
  33. x_sample = x_sample.astype(np.uint8)
  34. return Image.fromarray(x_sample)
  35. def sample_to_image(samples, index=0, approximation=None):
  36. return single_sample_to_image(samples[index], approximation)
  37. def samples_to_image_grid(samples, approximation=None):
  38. return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
  39. def store_latent(decoded):
  40. state.current_latent = decoded
  41. if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
  42. if not shared.parallel_processing_allowed:
  43. shared.state.assign_current_image(sample_to_image(decoded))
  44. def is_sampler_using_eta_noise_seed_delta(p):
  45. """returns whether sampler from config will use eta noise seed delta for image creation"""
  46. sampler_config = sd_samplers.find_sampler_config(p.sampler_name)
  47. eta = p.eta
  48. if eta is None and p.sampler is not None:
  49. eta = p.sampler.eta
  50. if eta is None and sampler_config is not None:
  51. eta = 0 if sampler_config.options.get("default_eta_is_0", False) else 1.0
  52. if eta == 0:
  53. return False
  54. return sampler_config.options.get("uses_ensd", False)
  55. class InterruptedException(BaseException):
  56. pass
  57. if opts.randn_source == "CPU":
  58. import torchsde._brownian.brownian_interval
  59. def torchsde_randn(size, dtype, device, seed):
  60. generator = torch.Generator(devices.cpu).manual_seed(int(seed))
  61. return torch.randn(size, dtype=dtype, device=devices.cpu, generator=generator).to(device)
  62. torchsde._brownian.brownian_interval._randn = torchsde_randn