From 040ec7a80e23d340efe1108b9de5ead62d9011a9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 30 Jan 2023 10:47:09 +0300 Subject: [PATCH] make the program read Eta and Eta DDIM from generation parameters --- modules/generation_parameters_copypaste.py | 2 ++ modules/processing.py | 1 - modules/sd_samplers_compvis.py | 3 ++- modules/sd_samplers_kdiffusion.py | 8 +++++--- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 2a10524f..7ee8ee10 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -293,6 +293,8 @@ infotext_to_setting_name_mapping = [ ('Model hash', 'sd_model_checkpoint'), ('ENSD', 'eta_noise_seed_delta'), ('Noise multiplier', 'initial_noise_multiplier'), + ('Eta', 'eta_ancestral'), + ('Eta DDIM', 'eta_ddim'), ] diff --git a/modules/processing.py b/modules/processing.py index 2d295932..e544c2e1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -455,7 +455,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), "Denoising strength": getattr(p, 'denoising_strength', None), "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, - "Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta), "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, } diff --git a/modules/sd_samplers_compvis.py b/modules/sd_samplers_compvis.py index 88541193..d03131cd 100644 --- a/modules/sd_samplers_compvis.py +++ b/modules/sd_samplers_compvis.py @@ -27,7 +27,6 @@ class VanillaStableDiffusionSampler: self.step = 0 self.stop_at = None self.eta = None - self.default_eta = 0.0 self.config = None self.last_latent = None @@ -102,6 +101,8 @@ class VanillaStableDiffusionSampler: def initialize(self, p): self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim + if self.eta != 0.0: + p.extra_generation_params["Eta DDIM"] = self.eta for fieldname in ['p_sample_ddim', 'p_sample_plms']: if hasattr(self.sampler, fieldname): diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index adb6883e..aa7f106b 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -2,7 +2,7 @@ from collections import deque import torch import inspect import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common, sd_samplers_compvis +from modules import prompt_parser, devices, sd_samplers_common from modules.shared import opts, state import modules.shared as shared @@ -164,7 +164,6 @@ class KDiffusionSampler: self.sampler_noises = None self.stop_at = None self.eta = None - self.default_eta = 1.0 self.config = None self.last_latent = None @@ -199,7 +198,7 @@ class KDiffusionSampler: self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None self.model_wrap_cfg.step = 0 - self.eta = p.eta or opts.eta_ancestral + self.eta = p.eta if p.eta is not None else opts.eta_ancestral k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) @@ -209,6 +208,9 @@ class KDiffusionSampler: extra_params_kwargs[param_name] = getattr(p, param_name) if 'eta' in inspect.signature(self.func).parameters: + if self.eta != 1.0: + p.extra_generation_params["Eta"] = self.eta + extra_params_kwargs['eta'] = self.eta return extra_params_kwargs