From 8c6a981d5d9ef30381ac2327460285111550acbc Mon Sep 17 00:00:00 2001 From: Michoko Date: Mon, 17 Oct 2022 11:05:05 +0200 Subject: [PATCH 01/30] Added dark mode switch Launch the UI in dark mode with the --dark-mode switch --- javascript/ui.js | 7 +++++++ modules/shared.py | 2 +- modules/ui.py | 2 ++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/javascript/ui.js b/javascript/ui.js index 9e1bed4c..bfa72885 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -1,5 +1,12 @@ // various functions for interation with ui.py not large enough to warrant putting them in separate files +function go_dark_mode(){ + gradioURL = window.location.href + if (!gradioURL.endsWith('?__theme=dark')) { + window.location.replace(gradioURL + '?__theme=dark'); + } +} + function selected_gallery_index(){ var buttons = gradioApp().querySelectorAll('[style="display: block;"].tabitem .gallery-item') var button = gradioApp().querySelector('[style="display: block;"].tabitem .gallery-item.\\!ring-2') diff --git a/modules/shared.py b/modules/shared.py index c2775603..cbf158e4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -69,13 +69,13 @@ parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image upload parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv')) parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) +parser.add_argument("--dark-mode", action='store_true', help="launches the UI in dark mode", default=False) parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False) parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False) parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None) parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) - cmd_opts = parser.parse_args() restricted_opts = [ "samples_filename_pattern", diff --git a/modules/ui.py b/modules/ui.py index 43dc88fc..a0cd052e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1783,6 +1783,8 @@ for filename in sorted(os.listdir(jsdir)): with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile: javascript += f"\n" +if cmd_opts.dark_mode: + javascript += "\n\n" if 'gradio_routes_templates_response' not in globals(): def template_response(*args, **kwargs): From 665beebc0825a6fad410c8252f27f6f6f0bd900b Mon Sep 17 00:00:00 2001 From: Michoko Date: Mon, 17 Oct 2022 18:24:24 +0200 Subject: [PATCH 02/30] Use of a --theme argument for more flexibility Added possibility to set the theme (light or dark) --- javascript/ui.js | 6 +++--- modules/shared.py | 2 +- modules/ui.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/javascript/ui.js b/javascript/ui.js index bfa72885..cfd0dcd3 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -1,9 +1,9 @@ // various functions for interation with ui.py not large enough to warrant putting them in separate files -function go_dark_mode(){ +function set_theme(theme){ gradioURL = window.location.href - if (!gradioURL.endsWith('?__theme=dark')) { - window.location.replace(gradioURL + '?__theme=dark'); + if (!gradioURL.includes('?__theme=')) { + window.location.replace(gradioURL + '?__theme=' + theme); } } diff --git a/modules/shared.py b/modules/shared.py index cbf158e4..fa084c69 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -69,7 +69,7 @@ parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image upload parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv')) parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) -parser.add_argument("--dark-mode", action='store_true', help="launches the UI in dark mode", default=False) +parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None) parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False) parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False) parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) diff --git a/modules/ui.py b/modules/ui.py index a0cd052e..d41715fa 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1783,8 +1783,8 @@ for filename in sorted(os.listdir(jsdir)): with open(os.path.join(jsdir, filename), "r", encoding="utf8") as jsfile: javascript += f"\n" -if cmd_opts.dark_mode: - javascript += "\n\n" +if cmd_opts.theme is not None: + javascript += f"\n\n" if 'gradio_routes_templates_response' not in globals(): def template_response(*args, **kwargs): From d62ef76614624cda99d842a2900242d5b7923eda Mon Sep 17 00:00:00 2001 From: guaneec Date: Tue, 18 Oct 2022 03:09:50 +0800 Subject: [PATCH 03/30] Don't eat colons in booru tags --- modules/deepbooru.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 4ad334a1..de16b13f 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -157,8 +157,6 @@ def get_deepbooru_tags_from_model(model, tags, pil_image, threshold, deepbooru_o # sort by reverse by likelihood and normal for alpha, and format tag text as requested unsorted_tags_in_theshold.sort(key=lambda y: y[sort_ndx], reverse=(not alpha_sort)) for weight, tag in unsorted_tags_in_theshold: - # note: tag_outformat will still have a colon if include_ranks is True - tag_outformat = tag.replace(':', ' ') if use_spaces: tag_outformat = tag_outformat.replace('_', ' ') if use_escape: From 2e28c841f438b2090caac2b9a54eb62ddbda837c Mon Sep 17 00:00:00 2001 From: guaneec Date: Tue, 18 Oct 2022 03:15:41 +0800 Subject: [PATCH 04/30] Oops --- modules/deepbooru.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index de16b13f..8914662d 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -157,6 +157,7 @@ def get_deepbooru_tags_from_model(model, tags, pil_image, threshold, deepbooru_o # sort by reverse by likelihood and normal for alpha, and format tag text as requested unsorted_tags_in_theshold.sort(key=lambda y: y[sort_ndx], reverse=(not alpha_sort)) for weight, tag in unsorted_tags_in_theshold: + tag_outformat = tag if use_spaces: tag_outformat = tag_outformat.replace('_', ' ') if use_escape: From 786ed499226177d71e937e0342bcb9d3b1ff260f Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Mon, 17 Oct 2022 19:48:39 +0300 Subject: [PATCH 05/30] use legacy attnblock --- modules/sd_hijack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 984b35c4..2407a461 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -27,7 +27,7 @@ def apply_optimizations(): if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)): print("Applying xformers cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward - ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward elif cmd_opts.opt_split_attention_v1: print("Applying v1 cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 From 2043c4a231eef838bb15044f502b864b55885037 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Mon, 17 Oct 2022 19:49:11 +0300 Subject: [PATCH 06/30] delete xformers attnblock --- modules/sd_hijack_optimizations.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 79405525..60da7459 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -292,15 +292,3 @@ def cross_attention_attnblock_forward(self, x): return h3 -def xformers_attnblock_forward(self, x): - try: - h_ = x - h_ = self.norm(h_) - q1 = self.q(h_).contiguous() - k1 = self.k(h_).contiguous() - v = self.v(h_).contiguous() - out = xformers.ops.memory_efficient_attention(q1, k1, v) - out = self.proj_out(out) - return x + out - except NotImplementedError: - return cross_attention_attnblock_forward(self, x) From 84823275e896bcc1f7cb4ce098ae3c5d05e17b9a Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Mon, 17 Oct 2022 22:18:59 +0300 Subject: [PATCH 07/30] readd xformers attnblock --- modules/sd_hijack_optimizations.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 60da7459..7ebef3f0 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -292,3 +292,18 @@ def cross_attention_attnblock_forward(self, x): return h3 +def xformers_attnblock_forward(self, x): + try: + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + b, c, h, w = q.shape + q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + out = xformers.ops.memory_efficient_attention(q, k, v) + out = rearrange(out, 'b (h w) c -> b c h w', h=h) + out = self.proj_out(out) + return x + out + except NotImplementedError: + return cross_attention_attnblock_forward(self, x) From 73b5dbf72a93b64445551c74a4c0dc924986081d Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Mon, 17 Oct 2022 22:19:18 +0300 Subject: [PATCH 08/30] Update sd_hijack.py --- modules/sd_hijack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 2407a461..984b35c4 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -27,7 +27,7 @@ def apply_optimizations(): if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)): print("Applying xformers cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward - ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward elif cmd_opts.opt_split_attention_v1: print("Applying v1 cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 From c71008c74156635558bb2e877d1628913f6f781e Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Tue, 18 Oct 2022 00:02:50 +0300 Subject: [PATCH 09/30] Update sd_hijack_optimizations.py --- modules/sd_hijack_optimizations.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 7ebef3f0..a3345bb9 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -301,6 +301,9 @@ def xformers_attnblock_forward(self, x): v = self.v(h_) b, c, h, w = q.shape q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q = q.contiguous() + k = k.contiguous() + v = v.contiguous() out = xformers.ops.memory_efficient_attention(q, k, v) out = rearrange(out, 'b (h w) c -> b c h w', h=h) out = self.proj_out(out) From 8b02662215917d39f76f86b703a322818d5a8ad4 Mon Sep 17 00:00:00 2001 From: trufty Date: Mon, 17 Oct 2022 10:58:21 -0400 Subject: [PATCH 10/30] Disable auto weights swap with config option --- modules/shared.py | 1 + modules/ui.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/modules/shared.py b/modules/shared.py index 9603d26e..8a1d1881 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -266,6 +266,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), + "disable_weights_auto_swap": OptionInfo(False, "Disable auto swapping weights to match model hash in prompts"), "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), "filter_nsfw": OptionInfo(False, "Filter NSFW content"), 'CLIP_stop_at_last_layers': OptionInfo(1, "Stop At last layers of CLIP model", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), diff --git a/modules/ui.py b/modules/ui.py index 1dae4a65..75eb0b0c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -542,6 +542,10 @@ def apply_setting(key, value): if value is None: return gr.update() + # dont allow model to be swapped when model hash exists in prompt + if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap: + return gr.update() + if key == "sd_model_checkpoint": ckpt_info = sd_models.get_closet_checkpoint_match(value) From d2f459c5cf9f728256775dc1c3380c7e9a7e27fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 18 Oct 2022 14:22:52 +0300 Subject: [PATCH 11/30] clarify the comment for the new option from #2959 and move it to UI section. --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 8a1d1881..c0d87168 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -266,7 +266,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), - "disable_weights_auto_swap": OptionInfo(False, "Disable auto swapping weights to match model hash in prompts"), "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), "filter_nsfw": OptionInfo(False, "Filter NSFW content"), 'CLIP_stop_at_last_layers': OptionInfo(1, "Stop At last layers of CLIP model", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), @@ -294,6 +293,7 @@ options_templates.update(options_section(('ui', "User interface"), { "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_name_to_info": OptionInfo(False, "Add model name to generation information"), + "disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."), "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), From 7543787a0a7d8a45c44f2293b242e128da7c5a1d Mon Sep 17 00:00:00 2001 From: Justin Maier Date: Mon, 17 Oct 2022 17:11:44 -0600 Subject: [PATCH 12/30] Auto select attention block for editing --- javascript/edit-attention.js | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index 67084e7a..c0d29a74 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -9,9 +9,38 @@ addEventListener('keydown', (event) => { let minus = "ArrowDown" if (event.key != plus && event.key != minus) return; - selectionStart = target.selectionStart; - selectionEnd = target.selectionEnd; - if(selectionStart == selectionEnd) return; + let selectionStart = target.selectionStart; + let selectionEnd = target.selectionEnd; + // If the user hasn't selected anything, let's select their current parenthesis block + if (selectionStart === selectionEnd) { + // Find opening parenthesis around current cursor + const before = target.value.substring(0, selectionStart); + let beforeParen = before.lastIndexOf("("); + if (beforeParen == -1) return; + let beforeParenClose = before.lastIndexOf(")"); + while (beforeParenClose !== -1 && beforeParenClose > beforeParen) { + beforeParen = before.lastIndexOf("(", beforeParen - 1); + beforeParenClose = before.lastIndexOf(")", beforeParenClose - 1); + } + + // Find closing parenthesis around current cursor + const after = target.value.substring(selectionStart); + let afterParen = after.indexOf(")"); + if (afterParen == -1) return; + let afterParenOpen = after.indexOf("("); + while (afterParenOpen !== -1 && afterParen > afterParenOpen) { + afterParen = after.indexOf(")", afterParen + 1); + afterParenOpen = after.indexOf("(", afterParenOpen + 1); + } + if (beforeParen === -1 || afterParen === -1) return; + + // Set the selection to the text between the parenthesis + const parenContent = target.value.substring(beforeParen + 1, selectionStart + afterParen); + const lastColon = parenContent.lastIndexOf(":"); + selectionStart = beforeParen + 1; + selectionEnd = selectionStart + lastColon; + target.setSelectionRange(selectionStart, selectionEnd); + } event.preventDefault(); From 428080d469a1b760d7b08a1d81fef77d3d47832a Mon Sep 17 00:00:00 2001 From: camenduru <54370274+camenduru@users.noreply.github.com> Date: Mon, 17 Oct 2022 03:20:11 +0300 Subject: [PATCH 13/30] Remove duplicate artists. --- artists.csv | 2 -- 1 file changed, 2 deletions(-) diff --git a/artists.csv b/artists.csv index 99cdbdc6..858dfcd6 100644 --- a/artists.csv +++ b/artists.csv @@ -738,7 +738,6 @@ Abraham Mignon,0.60605425,fineart Albert Bloch,0.69573116,nudity Charles Dana Gibson,0.67155975,fineart Alexandre-Évariste Fragonard,0.6507174,fineart -Alexandre-Évariste Fragonard,0.6507174,fineart Ernst Fuchs,0.6953538,nudity Alfredo Jaar,0.6952965,digipa-high-impact Judy Chicago,0.6952246,weird @@ -2411,7 +2410,6 @@ Hermann Feierabend,0.5346168,digipa-high-impact Antonio Donghi,0.4610982,digipa-low-impact Adonna Khare,0.4858036,digipa-med-impact James Stokoe,0.5015107,digipa-med-impact -Art & Language,0.5341332,digipa-high-impact Agustín Fernández,0.53403986,fineart Germán Londoño,0.5338712,fineart Emmanuelle Moureaux,0.5335641,digipa-high-impact From 97d3ba3941536215ea15431886c7f28300a9d915 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E3=81=B5=E3=81=81?= <34892635+fa0311@users.noreply.github.com> Date: Tue, 18 Oct 2022 17:29:42 +0900 Subject: [PATCH 14/30] Add scripts to ui-config,json --- modules/scripts.py | 15 +++++++++++++-- modules/ui.py | 5 +++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index ac66d448..3402066d 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -96,6 +96,7 @@ def wrap_call(func, filename, funcname, *args, default=None, **kwargs): class ScriptRunner: def __init__(self): self.scripts = [] + self.titles = [] def setup_ui(self, is_img2img): for script_class, path in scripts_data: @@ -107,9 +108,10 @@ class ScriptRunner: self.scripts.append(script) - titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.scripts] + self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.scripts] - dropdown = gr.Dropdown(label="Script", choices=["None"] + titles, value="None", type="index") + dropdown = gr.Dropdown(label="Script", choices=["None"] + self.titles, value="None", type="index") + dropdown.save_to_config = True inputs = [dropdown] for script in self.scripts: @@ -139,6 +141,15 @@ class ScriptRunner: return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))] + def init_field(title): + if title == "None": + return + script_index = self.titles.index(title) + script = self.scripts[script_index] + for i in range(script.args_from, script.args_to): + inputs[i].visible = True + + dropdown.init_field = init_field dropdown.change( fn=select_script, inputs=[dropdown], diff --git a/modules/ui.py b/modules/ui.py index 75eb0b0c..39afbc4e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1753,6 +1753,11 @@ Requested path was: {f} print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.') else: setattr(obj, field, saved_value) + if getattr(x, 'init_field', False): + try: + x.init_field(saved_value) + except Exception: + print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.') if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible: apply_field(x, 'visible') From de29ec0743fcfb141d8891a3ccbd537ea71bf5b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E3=81=B5=E3=81=81?= <34892635+fa0311@users.noreply.github.com> Date: Tue, 18 Oct 2022 18:15:00 +0900 Subject: [PATCH 15/30] Remove exception handling --- modules/ui.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 39afbc4e..b38bfb3f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1754,10 +1754,7 @@ Requested path was: {f} else: setattr(obj, field, saved_value) if getattr(x, 'init_field', False): - try: - x.init_field(saved_value) - except Exception: - print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.') + x.init_field(saved_value) if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible: apply_field(x, 'visible') From 3003438088502774628656790d83fc8074d51ab4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E3=81=B5=E3=81=81?= <34892635+fa0311@users.noreply.github.com> Date: Tue, 18 Oct 2022 18:51:57 +0900 Subject: [PATCH 16/30] Add visible for dropdown --- modules/ui.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index b38bfb3f..fb6eb5a0 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1737,7 +1737,7 @@ Requested path was: {f} print(traceback.format_exc(), file=sys.stderr) def loadsave(path, x): - def apply_field(obj, field, condition=None): + def apply_field(obj, field, condition=None, init_field=None): key = path + "/" + field if getattr(obj,'custom_script_source',None) is not None: @@ -1753,8 +1753,8 @@ Requested path was: {f} print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.') else: setattr(obj, field, saved_value) - if getattr(x, 'init_field', False): - x.init_field(saved_value) + if init_field is not None: + init_field(saved_value) if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible: apply_field(x, 'visible') @@ -1780,7 +1780,8 @@ Requested path was: {f} # Since there are many dropdowns that shouldn't be saved, # we only mark dropdowns that should be saved. if type(x) == gr.Dropdown and getattr(x, 'save_to_config', False): - apply_field(x, 'value', lambda val: val in x.choices) + apply_field(x, 'value', lambda val: val in x.choices, getattr(x, 'init_field', None)) + apply_field(x, 'visible') visit(txt2img_interface, loadsave, "txt2img") visit(img2img_interface, loadsave, "img2img") From 02622b19191f5f5112db7633c0630e5c7df1b2f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E3=81=B5=E3=81=81?= <34892635+fa0311@users.noreply.github.com> Date: Tue, 18 Oct 2022 18:52:27 +0900 Subject: [PATCH 17/30] update scripts.py --- modules/scripts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/scripts.py b/modules/scripts.py index 3402066d..1039fa9c 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -142,7 +142,7 @@ class ScriptRunner: return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))] def init_field(title): - if title == "None": + if title == 'None': return script_index = self.titles.index(title) script = self.scripts[script_index] From 68e83f40bf143e25639875b27e8ad3e32b382db5 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 16 Oct 2022 10:49:28 +0100 Subject: [PATCH 18/30] add update warning to launch.py --- launch.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/launch.py b/launch.py index 7520cfee..1b133633 100644 --- a/launch.py +++ b/launch.py @@ -5,6 +5,7 @@ import sys import importlib.util import shlex import platform +import requests dir_repos = "repositories" python = sys.executable @@ -126,6 +127,16 @@ def prepare_enviroment(): print(f"Python {sys.version}") print(f"Commit hash: {commit}") + try: + commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json() + if commit != "" and commits['commit']['sha'] != commit: + print("--------------------------------------------------------") + print("| You are not up to date with the most recent release. |") + print("| Consider running `git pull` to update. |") + print("--------------------------------------------------------") + except Exception as e: + pass + if not is_installed("torch") or not is_installed("torchvision"): run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch") From a647cbc618ea13ce2312f0291ebdfc3b9449e5a1 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 16 Oct 2022 10:54:09 +0100 Subject: [PATCH 19/30] move update check to after dep installation --- launch.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/launch.py b/launch.py index 1b133633..8484d7c1 100644 --- a/launch.py +++ b/launch.py @@ -5,7 +5,6 @@ import sys import importlib.util import shlex import platform -import requests dir_repos = "repositories" python = sys.executable @@ -126,16 +125,6 @@ def prepare_enviroment(): print(f"Python {sys.version}") print(f"Commit hash: {commit}") - - try: - commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json() - if commit != "" and commits['commit']['sha'] != commit: - print("--------------------------------------------------------") - print("| You are not up to date with the most recent release. |") - print("| Consider running `git pull` to update. |") - print("--------------------------------------------------------") - except Exception as e: - pass if not is_installed("torch") or not is_installed("torchvision"): run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch") @@ -176,6 +165,17 @@ def prepare_enviroment(): sys.argv += args + try: + import requests + commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json() + if commit != "" and commits['commit']['sha'] != commit: + print("--------------------------------------------------------") + print("| You are not up to date with the most recent release. |") + print("| Consider running `git pull` to update. |") + print("--------------------------------------------------------") + except Exception as e: + pass + if "--exit" in args: print("Exiting because of --exit argument") exit(0) From e511b867a9e85e715966c5c60052d67497a4f949 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 16 Oct 2022 17:04:09 +0100 Subject: [PATCH 20/30] Make update check commandline option, give response on all paths. --- launch.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/launch.py b/launch.py index 8484d7c1..9d9dd1b6 100644 --- a/launch.py +++ b/launch.py @@ -86,7 +86,24 @@ def git_clone(url, dir, name, commithash=None): if commithash is not None: run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") + +def version_check(commit): + try: + import requests + commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json() + if commit != "" and commits['commit']['sha'] != commit: + print("--------------------------------------------------------") + print("| You are not up to date with the most recent release. |") + print("| Consider running `git pull` to update. |") + print("--------------------------------------------------------") + elif commits['commit']['sha'] == commit: + print("You are up to date with the most recent release.") + else: + print("Not a git clone, can't perform version check.") + except Exception as e: + print("versipm check failed",e) + def prepare_enviroment(): torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") @@ -165,16 +182,8 @@ def prepare_enviroment(): sys.argv += args - try: - import requests - commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json() - if commit != "" and commits['commit']['sha'] != commit: - print("--------------------------------------------------------") - print("| You are not up to date with the most recent release. |") - print("| Consider running `git pull` to update. |") - print("--------------------------------------------------------") - except Exception as e: - pass + if '--update-check' in args: + version_check(commit) if "--exit" in args: print("Exiting because of --exit argument") From 4c605c5174a9b211c3a88e9aff5f5be92b53fd92 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 16 Oct 2022 17:24:06 +0100 Subject: [PATCH 21/30] add shared option for update check --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared.py b/modules/shared.py index c0d87168..50dc46ae 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -76,6 +76,7 @@ parser.add_argument("--disable-console-progressbars", action='store_true', help= parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None) parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) +parser.add_argument("--update-check", action='store_true', help="enable http check to confirm that the currently running version is the most recent release.", default=False) cmd_opts = parser.parse_args() restricted_opts = [ From 433a7525c1f5eb5963340e0cc45d31038ede3f7e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 18 Oct 2022 15:18:02 +0300 Subject: [PATCH 22/30] remove shared option for update check (because it is not an argument of webui) have launch.py examine both COMMANDLINE_ARGS as well as argv for its arguments --- launch.py | 19 +++++++++---------- modules/shared.py | 1 - 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/launch.py b/launch.py index 9d9dd1b6..7b15e78e 100644 --- a/launch.py +++ b/launch.py @@ -127,13 +127,14 @@ def prepare_enviroment(): codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") - args = shlex.split(commandline_args) + sys.argv += shlex.split(commandline_args) - args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test') - args, reinstall_xformers = extract_arg(args, '--reinstall-xformers') - xformers = '--xformers' in args - deepdanbooru = '--deepdanbooru' in args - ngrok = '--ngrok' in args + sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test') + sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers') + sys.argv, update_check = extract_arg(sys.argv, '--update-check') + xformers = '--xformers' in sys.argv + deepdanbooru = '--deepdanbooru' in sys.argv + ngrok = '--ngrok' in sys.argv try: commit = run(f"{git} rev-parse HEAD").strip() @@ -180,12 +181,10 @@ def prepare_enviroment(): run_pip(f"install -r {requirements_file}", "requirements for Web UI") - sys.argv += args - - if '--update-check' in args: + if update_check: version_check(commit) - if "--exit" in args: + if "--exit" in sys.argv: print("Exiting because of --exit argument") exit(0) diff --git a/modules/shared.py b/modules/shared.py index 50dc46ae..c0d87168 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -76,7 +76,6 @@ parser.add_argument("--disable-console-progressbars", action='store_true', help= parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencoders model', default=None) parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) -parser.add_argument("--update-check", action='store_true', help="enable http check to confirm that the currently running version is the most recent release.", default=False) cmd_opts = parser.parse_args() restricted_opts = [ From 2f448d97a9427f9a7bad19cf608561b2878ab2da Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 17 Oct 2022 23:18:21 +0900 Subject: [PATCH 23/30] styles.csv encoding utf8 to utf-8-sig utf-8-bom for better compatibility for some programs --- modules/styles.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/styles.py b/modules/styles.py index d44dfc1a..3bf5c5b6 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -45,7 +45,7 @@ class StyleDatabase: if not os.path.exists(path): return - with open(path, "r", encoding="utf8", newline='') as file: + with open(path, "r", encoding="utf-8-sig", newline='') as file: reader = csv.DictReader(file) for row in reader: # Support loading old CSV format with "name, text"-columns @@ -79,7 +79,7 @@ class StyleDatabase: def save_styles(self, path: str) -> None: # Write to temporary file first, so we don't nuke the file if something goes wrong fd, temp_path = tempfile.mkstemp(".csv") - with os.fdopen(fd, "w", encoding="utf8", newline='') as file: + with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) From e20b7e30fe17744acb74ad33c87c0963525ea921 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 18 Oct 2022 15:33:24 +0300 Subject: [PATCH 24/30] fix for add difference model merging --- modules/extras.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index c908b43e..03f6085e 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -216,8 +216,11 @@ def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_nam if theta_func1: for key in tqdm.tqdm(theta_1.keys()): if 'model' in key: - t2 = theta_2.get(key, torch.zeros_like(theta_1[key])) - theta_1[key] = theta_func1(theta_1[key], t2) + if key in theta_2: + t2 = theta_2.get(key, torch.zeros_like(theta_1[key])) + theta_1[key] = theta_func1(theta_1[key], t2) + else: + theta_1[key] = 0 del theta_2, teritary_model for key in tqdm.tqdm(theta_0.keys()): From ec1924ee5789b72c31c65932b549c59ccae0cdd6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 18 Oct 2022 16:05:52 +0300 Subject: [PATCH 25/30] additional fix for difference model merging --- modules/extras.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/extras.py b/modules/extras.py index 03f6085e..b853fa5b 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -220,7 +220,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, teritary_model_nam t2 = theta_2.get(key, torch.zeros_like(theta_1[key])) theta_1[key] = theta_func1(theta_1[key], t2) else: - theta_1[key] = 0 + theta_1[key] = torch.zeros_like(theta_1[key]) del theta_2, teritary_model for key in tqdm.tqdm(theta_0.keys()): From cbf15edbf90a68a08eeab40af5df577ba4ac90b6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 18 Oct 2022 17:23:38 +0300 Subject: [PATCH 26/30] remove dependence on TQDM for sampler progress/interrupt functionality --- modules/processing.py | 6 --- modules/sd_samplers.py | 107 ++++++++++++++++++++++------------------- 2 files changed, 58 insertions(+), 55 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index deb6125e..346eea88 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -402,12 +402,6 @@ def process_images(p: StableDiffusionProcessing) -> Processed: with devices.autocast(): samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength) - if state.interrupted or state.skipped: - - # if we are interrupted, sample returns just noise - # use the image collected previously in sampler loop - samples_ddim = shared.state.current_latent - samples_ddim = samples_ddim.to(devices.dtype_vae) x_samples_ddim = decode_first_stage(p.sd_model, samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 20309e06..b58e810b 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -98,25 +98,8 @@ def store_latent(decoded): shared.state.current_image = sample_to_image(decoded) - -def extended_tdqm(sequence, *args, desc=None, **kwargs): - state.sampling_steps = len(sequence) - state.sampling_step = 0 - - seq = sequence if cmd_opts.disable_console_progressbars else tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs) - - for x in seq: - if state.interrupted or state.skipped: - break - - yield x - - state.sampling_step += 1 - shared.total_tqdm.update() - - -ldm.models.diffusion.ddim.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs) -ldm.models.diffusion.plms.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs) +class InterruptedException(BaseException): + pass class VanillaStableDiffusionSampler: @@ -128,14 +111,32 @@ class VanillaStableDiffusionSampler: self.init_latent = None self.sampler_noises = None self.step = 0 + self.stop_at = None self.eta = None self.default_eta = 0.0 self.config = None + self.last_latent = None def number_of_needed_noises(self, p): return 0 + def launch_sampling(self, steps, func): + state.sampling_steps = steps + state.sampling_step = 0 + + try: + return func() + except InterruptedException: + return self.last_latent + def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs): + if state.interrupted or state.skipped: + raise InterruptedException + + if self.stop_at is not None and self.step > self.stop_at: + raise InterruptedException + + conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step) @@ -159,11 +160,16 @@ class VanillaStableDiffusionSampler: res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs) if self.mask is not None: - store_latent(self.init_latent * self.mask + self.nmask * res[1]) + self.last_latent = self.init_latent * self.mask + self.nmask * res[1] else: - store_latent(res[1]) + self.last_latent = res[1] + + store_latent(self.last_latent) self.step += 1 + state.sampling_step = self.step + shared.total_tqdm.update() + return res def initialize(self, p): @@ -192,7 +198,7 @@ class VanillaStableDiffusionSampler: self.init_latent = x self.step = 0 - samples = self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning) + samples = self.launch_sampling(steps, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning)) return samples @@ -206,9 +212,9 @@ class VanillaStableDiffusionSampler: # existing code fails with certain step counts, like 9 try: - samples_ddim, _ = self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta) + samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) except Exception: - samples_ddim, _ = self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta) + samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) return samples_ddim @@ -223,6 +229,9 @@ class CFGDenoiser(torch.nn.Module): self.step = 0 def forward(self, x, sigma, uncond, cond, cond_scale): + if state.interrupted or state.skipped: + raise InterruptedException + conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) @@ -268,25 +277,6 @@ class CFGDenoiser(torch.nn.Module): return denoised -def extended_trange(sampler, count, *args, **kwargs): - state.sampling_steps = count - state.sampling_step = 0 - - seq = range(count) if cmd_opts.disable_console_progressbars else tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs) - - for x in seq: - if state.interrupted or state.skipped: - break - - if sampler.stop_at is not None and x > sampler.stop_at: - break - - yield x - - state.sampling_step += 1 - shared.total_tqdm.update() - - class TorchHijack: def __init__(self, kdiff_sampler): self.kdiff_sampler = kdiff_sampler @@ -314,9 +304,28 @@ class KDiffusionSampler: self.eta = None self.default_eta = 1.0 self.config = None + self.last_latent = None def callback_state(self, d): - store_latent(d["denoised"]) + step = d['i'] + latent = d["denoised"] + store_latent(latent) + self.last_latent = latent + + if self.stop_at is not None and step > self.stop_at: + raise InterruptedException + + state.sampling_step = step + shared.total_tqdm.update() + + def launch_sampling(self, steps, func): + state.sampling_steps = steps + state.sampling_step = 0 + + try: + return func() + except InterruptedException: + return self.last_latent def number_of_needed_noises(self, p): return p.steps @@ -339,9 +348,6 @@ class KDiffusionSampler: self.sampler_noise_index = 0 self.eta = p.eta or opts.eta_ancestral - if hasattr(k_diffusion.sampling, 'trange'): - k_diffusion.sampling.trange = lambda *args, **kwargs: extended_trange(self, *args, **kwargs) - if self.sampler_noises is not None: k_diffusion.sampling.torch = TorchHijack(self) @@ -383,8 +389,9 @@ class KDiffusionSampler: self.model_wrap_cfg.init_latent = x - return self.func(self.model_wrap_cfg, xi, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs) + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, xi, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)) + return samples def sample(self, p, x, conditioning, unconditional_conditioning, steps=None): steps = steps or p.steps @@ -406,6 +413,8 @@ class KDiffusionSampler: extra_params_kwargs['n'] = steps else: extra_params_kwargs['sigmas'] = sigmas - samples = self.func(self.model_wrap_cfg, x, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs) + + samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={'cond': conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale}, disable=False, callback=self.callback_state, **extra_params_kwargs)) + return samples From bdf1a8903a38e6c29afdbfb6370c40976cdfd99a Mon Sep 17 00:00:00 2001 From: Matthew Clark Date: Tue, 4 Oct 2022 13:44:21 -0400 Subject: [PATCH 27/30] Pass arguments from bash to python --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index 980c0aaf..a9f85d89 100755 --- a/webui.sh +++ b/webui.sh @@ -138,4 +138,4 @@ fi printf "\n%s\n" "${delimiter}" printf "Launching launch.py..." printf "\n%s\n" "${delimiter}" -"${python_cmd}" "${LAUNCH_SCRIPT}" +"${python_cmd}" "${LAUNCH_SCRIPT}" "$@" From 82589c2d5e97491b8fc741217800598ba07e2c46 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Mon, 10 Oct 2022 01:52:35 +0100 Subject: [PATCH 28/30] add windows equivalent --- webui.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.bat b/webui.bat index 3f1d03f6..a38a28bb 100644 --- a/webui.bat +++ b/webui.bat @@ -33,7 +33,7 @@ goto :launch :skip_venv :launch -%PYTHON% launch.py +%PYTHON% launch.py %* pause exit /b From b76c9ded4556efa8fa19bcc3e9f442dd33ed9daa Mon Sep 17 00:00:00 2001 From: supersteve3d <39339941+supersteve3d@users.noreply.github.com> Date: Tue, 18 Oct 2022 20:14:49 +0800 Subject: [PATCH 29/30] Update artists.csv --- artists.csv | 2 -- 1 file changed, 2 deletions(-) diff --git a/artists.csv b/artists.csv index 858dfcd6..c54de486 100644 --- a/artists.csv +++ b/artists.csv @@ -523,7 +523,6 @@ Affandi,0.7170285,nudity Diane Arbus,0.655138,digipa-high-impact Joseph Ducreux,0.65247905,digipa-high-impact Berthe Morisot,0.7165984,fineart -Hilma AF Klint,0.71643853,scribbles Hilma af Klint,0.71643853,scribbles Filippino Lippi,0.7163017,fineart Leonid Afremov,0.7163005,fineart @@ -1521,7 +1520,6 @@ Gertrude Harvey,0.5903887,fineart Grant Wood,0.6266253,fineart Fyodor Vasilyev,0.5234919,digipa-med-impact Cagnaccio di San Pietro,0.6261671,fineart -Cagnaccio Di San Pietro,0.6261671,fineart Doris Boulton-Maude,0.62593174,fineart Adolf Hirémy-Hirschl,0.5946784,fineart Harold von Schmidt,0.6256755,fineart From c1093b8051606f0ac90506b7114c4b55d0447c70 Mon Sep 17 00:00:00 2001 From: supersteve3d <39339941+supersteve3d@users.noreply.github.com> Date: Tue, 18 Oct 2022 20:19:12 +0800 Subject: [PATCH 30/30] Update artists.csv --- artists.csv | 3 --- 1 file changed, 3 deletions(-) diff --git a/artists.csv b/artists.csv index c54de486..1a61ed88 100644 --- a/artists.csv +++ b/artists.csv @@ -743,7 +743,6 @@ Judy Chicago,0.6952246,weird Frans van Mieris the Younger,0.6951849,fineart Aertgen van Leyden,0.6951305,fineart Emily Carr,0.69512105,fineart -Frances Macdonald,0.6950408,scribbles Frances MacDonald,0.6950408,scribbles Hannah Höch,0.69495845,scribbles Gillis Rombouts,0.58770025,fineart @@ -893,7 +892,6 @@ Richard McGuire,0.6820089,scribbles Anni Albers,0.65708244,digipa-high-impact Aleksey Savrasov,0.65207493,fineart Wayne Barlowe,0.6537874,fineart -Giorgio De Chirico,0.6815907,fineart Giorgio de Chirico,0.6815907,fineart Ernest Procter,0.6815795,fineart Adriaen Brouwer,0.6815058,fineart @@ -1239,7 +1237,6 @@ Betty Churcher,0.65387225,fineart Claes Corneliszoon Moeyaert,0.65386075,fineart David Bomberg,0.6537477,fineart Abraham Bosschaert,0.6535562,fineart -Giuseppe De Nittis,0.65354455,fineart Giuseppe de Nittis,0.65354455,fineart John La Farge,0.65342575,fineart Frits Thaulow,0.65341854,fineart