diff --git a/modules/processing.py b/modules/processing.py index 3d2c4dc9..ab5a34d0 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -795,7 +795,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): for img in self.init_images: image = img.convert("RGB") - if crop_region is None: + if crop_region is None and self.resize_mode != 3: image = images.resize_image(self.resize_mode, image, self.width, self.height) if image_mask is not None: @@ -804,6 +804,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.overlay_images.append(image_masked.convert('RGBA')) + # crop_region is not none iif we are doing inpaint full res if crop_region is not None: image = image.crop(crop_region) image = images.resize_image(2, image, self.width, self.height) @@ -840,6 +841,9 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image)) + if self.resize_mode == 3: + self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") + if image_mask is not None: init_mask = latent_mask latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2])) diff --git a/modules/ui.py b/modules/ui.py index b2b8de90..fe4abe05 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -829,7 +829,7 @@ def create_ui(): img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs) with gr.Row(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize") + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill", "Upscale Latent Space"], type="index", value="Just resize") steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20) sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")