Add upscaler to img2img

This commit is contained in:
space-nuko 2023-02-19 03:45:43 -08:00
parent 68999d0b15
commit 7ea5d395c4
6 changed files with 30 additions and 13 deletions

View File

@ -282,8 +282,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
res["Hires resize-1"] = 0 res["Hires resize-1"] = 0
res["Hires resize-2"] = 0 res["Hires resize-2"] = 0
if "Img2Img Upscale" not in res: if "Img2Img upscale" not in res:
res["Img2Img Upscale"] = 1 res["Img2Img upscale"] = 1
restore_old_hires_fix_params(res) restore_old_hires_fix_params(res)

View File

@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processed_image.save(os.path.join(output_dir, filename)) processed_image.save(os.path.join(output_dir, filename))
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, scale: float, upscaler: str, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts) override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5 is_batch = mode == 5
@ -150,6 +150,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpainting_mask_invert=inpainting_mask_invert, inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings, override_settings=override_settings,
scale=scale, scale=scale,
upscaler=upscaler,
) )
p.scripts = modules.scripts.scripts_txt2img p.scripts = modules.scripts.scripts_txt2img

View File

@ -929,7 +929,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None sampler = None
def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, **kwargs): def __init__(self, init_images: Optional[list] = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: Optional[float] = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: Optional[float] = None, scale: float = 0, upscaler: Optional[str] = None, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.init_images = init_images self.init_images = init_images
@ -950,6 +950,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.nmask = None self.nmask = None
self.image_conditioning = None self.image_conditioning = None
self.scale = scale self.scale = scale
self.upscaler = upscaler
def get_final_size(self): def get_final_size(self):
if self.scale > 1: if self.scale > 1:
@ -966,7 +967,16 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
crop_region = None crop_region = None
if self.scale > 1: if self.scale > 1:
self.extra_generation_params["Img2Img Upscale"] = self.scale self.extra_generation_params["Img2Img upscale"] = self.scale
# Non-latent upscalers are run before sampling
# Latent upscalers are run during sampling
init_upscaler = None
if self.upscaler is not None:
self.extra_generation_params["Img2Img upscaler"] = self.upscaler
if self.upscaler not in shared.latent_upscale_modes:
assert len([x for x in shared.sd_upscalers if x.name == self.upscaler]) > 0, f"could not find upscaler named {self.upscaler}"
init_upscaler = self.upscaler
self.width, self.height = self.get_final_size() self.width, self.height = self.get_final_size()
@ -992,7 +1002,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image_mask = images.resize_image(2, mask, self.width, self.height) image_mask = images.resize_image(2, mask, self.width, self.height)
self.paste_to = (x1, y1, x2-x1, y2-y1) self.paste_to = (x1, y1, x2-x1, y2-y1)
else: else:
image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height, init_upscaler)
np_mask = np.array(image_mask) np_mask = np.array(image_mask)
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8) np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
self.mask_for_overlay = Image.fromarray(np_mask) self.mask_for_overlay = Image.fromarray(np_mask)
@ -1009,7 +1019,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
image = images.flatten(img, opts.img2img_background_color) image = images.flatten(img, opts.img2img_background_color)
if crop_region is None and self.resize_mode != 3: if crop_region is None and self.resize_mode != 3:
image = images.resize_image(self.resize_mode, image, self.width, self.height) image = images.resize_image(self.resize_mode, image, self.width, self.height, init_upscaler)
if image_mask is not None: if image_mask is not None:
image_masked = Image.new('RGBa', (image.width, image.height)) image_masked = Image.new('RGBa', (image.width, image.height))
@ -1054,8 +1064,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image)) self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
if self.resize_mode == 3: latent_scale_mode = shared.latent_upscale_modes.get(self.upscaler, None) if self.upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") if latent_scale_mode is not None:
self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
if image_mask is not None: if image_mask is not None:
init_mask = latent_mask init_mask = latent_mask

View File

@ -767,7 +767,7 @@ def create_ui():
) )
with FormRow(): with FormRow():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
for category in ordered_ui_categories(): for category in ordered_ui_categories():
if category == "sampler": if category == "sampler":
@ -797,7 +797,9 @@ def create_ui():
with FormRow(): with FormRow():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit") image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit")
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") with FormRow():
upscaler = gr.Dropdown(label="Upscaler", elem_id="img2img_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
elif category == "seed": elif category == "seed":
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
@ -934,6 +936,7 @@ def create_ui():
height, height,
width, width,
scale, scale,
upscaler,
resize_mode, resize_mode,
inpaint_full_res, inpaint_full_res,
inpaint_full_res_padding, inpaint_full_res_padding,
@ -1019,7 +1022,8 @@ def create_ui():
(seed, "Seed"), (seed, "Seed"),
(width, "Size-1"), (width, "Size-1"),
(height, "Size-2"), (height, "Size-2"),
(scale, "Img2Img Upscale"), (scale, "Img2Img upscale"),
(upscaler, "Img2Img upscaler"),
(batch_size, "Batch size"), (batch_size, "Batch size"),
(subseed, "Variation seed"), (subseed, "Variation seed"),
(subseed_strength, "Variation seed strength"), (subseed_strength, "Variation seed strength"),

View File

@ -220,6 +220,7 @@ axis_options = [
AxisOption("Clip skip", int, apply_clip_skip), AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")), AxisOption("Denoising", float, apply_field("denoising_strength")),
AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
AxisOptionImg2Img("Upscaler", str, apply_field("upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)), AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)),
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)), AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),

View File

@ -779,7 +779,7 @@ footer {
#img2img_finalres{ #img2img_finalres{
min-height: 0 !important; min-height: 0 !important;
padding: .625rem .75rem; padding: .625rem .75rem;
margin-left: -0.75em margin-left: 0.25em
} }
#img2img_finalres .resolution{ #img2img_finalres .resolution{