Merge branch 'master' of github.com:AUTOMATIC1111/stable-diffusion-webui into gamepad
This commit is contained in:
commit
5e1f4f7464
@ -17,7 +17,7 @@ titles = {
|
|||||||
"\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
|
"\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
|
||||||
"\u{1f4c2}": "Open images output directory",
|
"\u{1f4c2}": "Open images output directory",
|
||||||
"\u{1f4be}": "Save style",
|
"\u{1f4be}": "Save style",
|
||||||
"\U0001F5D1": "Clear prompt",
|
"\u{1f5d1}": "Clear prompt",
|
||||||
"\u{1f4cb}": "Apply selected styles to current prompt",
|
"\u{1f4cb}": "Apply selected styles to current prompt",
|
||||||
"\u{1f4d2}": "Paste available values into the field",
|
"\u{1f4d2}": "Paste available values into the field",
|
||||||
"\u{1f3b4}": "Show extra networks",
|
"\u{1f3b4}": "Show extra networks",
|
||||||
|
@ -2,6 +2,7 @@ import sys, os, shlex
|
|||||||
import contextlib
|
import contextlib
|
||||||
import torch
|
import torch
|
||||||
from modules import errors
|
from modules import errors
|
||||||
|
from modules.sd_hijack_utils import CondFunc
|
||||||
from packaging import version
|
from packaging import version
|
||||||
|
|
||||||
|
|
||||||
@ -156,36 +157,7 @@ def test_for_nans(x, where):
|
|||||||
raise NansException(message)
|
raise NansException(message)
|
||||||
|
|
||||||
|
|
||||||
# MPS workaround for https://github.com/pytorch/pytorch/issues/79383
|
|
||||||
orig_tensor_to = torch.Tensor.to
|
|
||||||
def tensor_to_fix(self, *args, **kwargs):
|
|
||||||
if self.device.type != 'mps' and \
|
|
||||||
((len(args) > 0 and isinstance(args[0], torch.device) and args[0].type == 'mps') or \
|
|
||||||
(isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps')):
|
|
||||||
self = self.contiguous()
|
|
||||||
return orig_tensor_to(self, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
# MPS workaround for https://github.com/pytorch/pytorch/issues/80800
|
|
||||||
orig_layer_norm = torch.nn.functional.layer_norm
|
|
||||||
def layer_norm_fix(*args, **kwargs):
|
|
||||||
if len(args) > 0 and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps':
|
|
||||||
args = list(args)
|
|
||||||
args[0] = args[0].contiguous()
|
|
||||||
return orig_layer_norm(*args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
# MPS workaround for https://github.com/pytorch/pytorch/issues/90532
|
|
||||||
orig_tensor_numpy = torch.Tensor.numpy
|
|
||||||
def numpy_fix(self, *args, **kwargs):
|
|
||||||
if self.requires_grad:
|
|
||||||
self = self.detach()
|
|
||||||
return orig_tensor_numpy(self, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
|
# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
|
||||||
orig_cumsum = torch.cumsum
|
|
||||||
orig_Tensor_cumsum = torch.Tensor.cumsum
|
|
||||||
def cumsum_fix(input, cumsum_func, *args, **kwargs):
|
def cumsum_fix(input, cumsum_func, *args, **kwargs):
|
||||||
if input.device.type == 'mps':
|
if input.device.type == 'mps':
|
||||||
output_dtype = kwargs.get('dtype', input.dtype)
|
output_dtype = kwargs.get('dtype', input.dtype)
|
||||||
@ -199,11 +171,20 @@ def cumsum_fix(input, cumsum_func, *args, **kwargs):
|
|||||||
if has_mps():
|
if has_mps():
|
||||||
if version.parse(torch.__version__) < version.parse("1.13"):
|
if version.parse(torch.__version__) < version.parse("1.13"):
|
||||||
# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
|
# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
|
||||||
torch.Tensor.to = tensor_to_fix
|
|
||||||
torch.nn.functional.layer_norm = layer_norm_fix
|
# MPS workaround for https://github.com/pytorch/pytorch/issues/79383
|
||||||
torch.Tensor.numpy = numpy_fix
|
CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs),
|
||||||
|
lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps'))
|
||||||
|
# MPS workaround for https://github.com/pytorch/pytorch/issues/80800
|
||||||
|
CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs),
|
||||||
|
lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps')
|
||||||
|
# MPS workaround for https://github.com/pytorch/pytorch/issues/90532
|
||||||
|
CondFunc('torch.Tensor.numpy', lambda orig_func, self, *args, **kwargs: orig_func(self.detach(), *args, **kwargs), lambda _, self, *args, **kwargs: self.requires_grad)
|
||||||
elif version.parse(torch.__version__) > version.parse("1.13.1"):
|
elif version.parse(torch.__version__) > version.parse("1.13.1"):
|
||||||
cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
|
cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
|
||||||
cumsum_needs_bool_fix = not torch.BoolTensor([True,True]).to(device=torch.device("mps"), dtype=torch.int64).equal(torch.BoolTensor([True,False]).to(torch.device("mps")).cumsum(0))
|
cumsum_needs_bool_fix = not torch.BoolTensor([True,True]).to(device=torch.device("mps"), dtype=torch.int64).equal(torch.BoolTensor([True,False]).to(torch.device("mps")).cumsum(0))
|
||||||
torch.cumsum = lambda input, *args, **kwargs: ( cumsum_fix(input, orig_cumsum, *args, **kwargs) )
|
cumsum_fix_func = lambda orig_func, input, *args, **kwargs: cumsum_fix(input, orig_func, *args, **kwargs)
|
||||||
torch.Tensor.cumsum = lambda self, *args, **kwargs: ( cumsum_fix(self, orig_Tensor_cumsum, *args, **kwargs) )
|
CondFunc('torch.cumsum', cumsum_fix_func, None)
|
||||||
|
CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None)
|
||||||
|
CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None)
|
||||||
|
|
||||||
|
@ -327,7 +327,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
|
|||||||
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
|
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
|
||||||
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
|
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"),
|
||||||
|
|
||||||
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"),
|
"use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
|
||||||
"use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
|
"use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
|
||||||
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
|
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
|
||||||
"do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"),
|
"do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"),
|
||||||
|
@ -479,8 +479,8 @@ def create_ui():
|
|||||||
width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
|
width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
|
||||||
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
|
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
|
||||||
|
|
||||||
|
res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn")
|
||||||
if opts.dimensions_and_batch_together:
|
if opts.dimensions_and_batch_together:
|
||||||
res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn")
|
|
||||||
with gr.Column(elem_id="txt2img_column_batch"):
|
with gr.Column(elem_id="txt2img_column_batch"):
|
||||||
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
|
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
|
||||||
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
|
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
|
||||||
@ -757,8 +757,8 @@ def create_ui():
|
|||||||
width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
|
width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
|
||||||
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
|
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
|
||||||
|
|
||||||
|
res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
|
||||||
if opts.dimensions_and_batch_together:
|
if opts.dimensions_and_batch_together:
|
||||||
res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
|
|
||||||
with gr.Column(elem_id="img2img_column_batch"):
|
with gr.Column(elem_id="img2img_column_batch"):
|
||||||
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
|
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
|
||||||
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
|
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
|
||||||
|
@ -44,16 +44,40 @@ class Script(scripts.Script):
|
|||||||
def title(self):
|
def title(self):
|
||||||
return "Prompt matrix"
|
return "Prompt matrix"
|
||||||
|
|
||||||
def ui(self, is_img2img):
|
def ui(self, is_img2img):
|
||||||
put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start"))
|
gr.HTML('<br />')
|
||||||
different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds"))
|
with gr.Row():
|
||||||
|
with gr.Column():
|
||||||
|
put_at_start = gr.Checkbox(label='Put variable parts at start of prompt',
|
||||||
|
value=False, elem_id=self.elem_id("put_at_start"))
|
||||||
|
with gr.Column():
|
||||||
|
# Radio buttons for selecting the prompt between positive and negative
|
||||||
|
prompt_type = gr.Radio(["positive", "negative"], label="Select prompt",
|
||||||
|
elem_id=self.elem_id("prompt_type"), value="positive")
|
||||||
|
with gr.Row():
|
||||||
|
with gr.Column():
|
||||||
|
different_seeds = gr.Checkbox(
|
||||||
|
label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds"))
|
||||||
|
with gr.Column():
|
||||||
|
# Radio buttons for selecting the delimiter to use in the resulting prompt
|
||||||
|
variations_delimiter = gr.Radio(["comma", "space"], label="Select delimiter", elem_id=self.elem_id(
|
||||||
|
"variations_delimiter"), value="comma")
|
||||||
|
return [put_at_start, different_seeds, prompt_type, variations_delimiter]
|
||||||
|
|
||||||
return [put_at_start, different_seeds]
|
def run(self, p, put_at_start, different_seeds, prompt_type, variations_delimiter):
|
||||||
|
|
||||||
def run(self, p, put_at_start, different_seeds):
|
|
||||||
modules.processing.fix_seed(p)
|
modules.processing.fix_seed(p)
|
||||||
|
# Raise error if promp type is not positive or negative
|
||||||
|
if prompt_type not in ["positive", "negative"]:
|
||||||
|
raise ValueError(f"Unknown prompt type {prompt_type}")
|
||||||
|
# Raise error if variations delimiter is not comma or space
|
||||||
|
if variations_delimiter not in ["comma", "space"]:
|
||||||
|
raise ValueError(f"Unknown variations delimiter {variations_delimiter}")
|
||||||
|
|
||||||
original_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt
|
prompt = p.prompt if prompt_type == "positive" else p.negative_prompt
|
||||||
|
original_prompt = prompt[0] if type(prompt) == list else prompt
|
||||||
|
positive_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt
|
||||||
|
|
||||||
|
delimiter = ", " if variations_delimiter == "comma" else " "
|
||||||
|
|
||||||
all_prompts = []
|
all_prompts = []
|
||||||
prompt_matrix_parts = original_prompt.split("|")
|
prompt_matrix_parts = original_prompt.split("|")
|
||||||
@ -66,16 +90,19 @@ class Script(scripts.Script):
|
|||||||
else:
|
else:
|
||||||
selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
|
selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
|
||||||
|
|
||||||
all_prompts.append(", ".join(selected_prompts))
|
all_prompts.append(delimiter.join(selected_prompts))
|
||||||
|
|
||||||
p.n_iter = math.ceil(len(all_prompts) / p.batch_size)
|
p.n_iter = math.ceil(len(all_prompts) / p.batch_size)
|
||||||
p.do_not_save_grid = True
|
p.do_not_save_grid = True
|
||||||
|
|
||||||
print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.")
|
print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.")
|
||||||
|
|
||||||
p.prompt = all_prompts
|
if prompt_type == "positive":
|
||||||
|
p.prompt = all_prompts
|
||||||
|
else:
|
||||||
|
p.negative_prompt = all_prompts
|
||||||
p.seed = [p.seed + (i if different_seeds else 0) for i in range(len(all_prompts))]
|
p.seed = [p.seed + (i if different_seeds else 0) for i in range(len(all_prompts))]
|
||||||
p.prompt_for_display = original_prompt
|
p.prompt_for_display = positive_prompt
|
||||||
processed = process_images(p)
|
processed = process_images(p)
|
||||||
|
|
||||||
grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
|
grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
|
||||||
|
@ -286,23 +286,24 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend
|
|||||||
print("Unexpected error: draw_xyz_grid failed to return even a single processed image")
|
print("Unexpected error: draw_xyz_grid failed to return even a single processed image")
|
||||||
return Processed(p, [])
|
return Processed(p, [])
|
||||||
|
|
||||||
grids = [None] * len(zs)
|
sub_grids = [None] * len(zs)
|
||||||
for i in range(len(zs)):
|
for i in range(len(zs)):
|
||||||
start_index = i * len(xs) * len(ys)
|
start_index = i * len(xs) * len(ys)
|
||||||
end_index = start_index + len(xs) * len(ys)
|
end_index = start_index + len(xs) * len(ys)
|
||||||
grid = images.image_grid(image_cache[start_index:end_index], rows=len(ys))
|
grid = images.image_grid(image_cache[start_index:end_index], rows=len(ys))
|
||||||
if draw_legend:
|
if draw_legend:
|
||||||
grid = images.draw_grid_annotations(grid, cell_size[0], cell_size[1], hor_texts, ver_texts)
|
grid = images.draw_grid_annotations(grid, cell_size[0], cell_size[1], hor_texts, ver_texts)
|
||||||
|
sub_grids[i] = grid
|
||||||
grids[i] = grid
|
|
||||||
if include_sub_grids and len(zs) > 1:
|
if include_sub_grids and len(zs) > 1:
|
||||||
processed_result.images.insert(i+1, grid)
|
processed_result.images.insert(i+1, grid)
|
||||||
|
|
||||||
original_grid_size = grids[0].size
|
sub_grid_size = sub_grids[0].size
|
||||||
grids = images.image_grid(grids, rows=1)
|
z_grid = images.image_grid(sub_grids, rows=1)
|
||||||
processed_result.images[0] = images.draw_grid_annotations(grids, original_grid_size[0], original_grid_size[1], title_texts, [[images.GridAnnotation()]])
|
if draw_legend:
|
||||||
|
z_grid = images.draw_grid_annotations(z_grid, sub_grid_size[0], sub_grid_size[1], title_texts, [[images.GridAnnotation()]])
|
||||||
|
processed_result.images[0] = z_grid
|
||||||
|
|
||||||
return processed_result
|
return processed_result, sub_grids
|
||||||
|
|
||||||
|
|
||||||
class SharedSettingsStackHelper(object):
|
class SharedSettingsStackHelper(object):
|
||||||
@ -576,7 +577,7 @@ class Script(scripts.Script):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
with SharedSettingsStackHelper():
|
with SharedSettingsStackHelper():
|
||||||
processed = draw_xyz_grid(
|
processed, sub_grids = draw_xyz_grid(
|
||||||
p,
|
p,
|
||||||
xs=xs,
|
xs=xs,
|
||||||
ys=ys,
|
ys=ys,
|
||||||
@ -592,6 +593,10 @@ class Script(scripts.Script):
|
|||||||
second_axes_processed=second_axes_processed
|
second_axes_processed=second_axes_processed
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if opts.grid_save and len(sub_grids) > 1:
|
||||||
|
for sub_grid in sub_grids:
|
||||||
|
images.save_image(sub_grid, p.outpath_grids, "xyz_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p)
|
||||||
|
|
||||||
if opts.grid_save:
|
if opts.grid_save:
|
||||||
images.save_image(processed.images[0], p.outpath_grids, "xyz_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p)
|
images.save_image(processed.images[0], p.outpath_grids, "xyz_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user