ruff auto fixes

This commit is contained in:
AUTOMATIC 2023-05-10 11:05:02 +03:00
parent e42de4b8a2
commit 028d3f6425
22 changed files with 47 additions and 47 deletions

View File

@ -288,5 +288,5 @@ class VQModelInterface(VQModel):
dec = self.decoder(quant) dec = self.decoder(quant)
return dec return dec
setattr(ldm.models.autoencoder, "VQModel", VQModel) ldm.models.autoencoder.VQModel = VQModel
setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface) ldm.models.autoencoder.VQModelInterface = VQModelInterface

View File

@ -1116,7 +1116,7 @@ class LatentDiffusionV1(DDPMV1):
if cond is not None: if cond is not None:
if isinstance(cond, dict): if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond} [x[:batch_size] for x in cond[key]] for key in cond}
else: else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
@ -1215,7 +1215,7 @@ class LatentDiffusionV1(DDPMV1):
if cond is not None: if cond is not None:
if isinstance(cond, dict): if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond} [x[:batch_size] for x in cond[key]] for key in cond}
else: else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
return self.p_sample_loop(cond, return self.p_sample_loop(cond,
@ -1437,7 +1437,7 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1):
logs['bbox_image'] = cond_img logs['bbox_image'] = cond_img
return logs return logs
setattr(ldm.models.diffusion.ddpm, "DDPMV1", DDPMV1) ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
setattr(ldm.models.diffusion.ddpm, "LatentDiffusionV1", LatentDiffusionV1) ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
setattr(ldm.models.diffusion.ddpm, "DiffusionWrapperV1", DiffusionWrapperV1) ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
setattr(ldm.models.diffusion.ddpm, "Layout2ImgDiffusionV1", Layout2ImgDiffusionV1) ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1

View File

@ -172,7 +172,7 @@ def load_lora(name, filename):
else: else:
print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}') print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
continue continue
assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}' raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}")
with torch.no_grad(): with torch.no_grad():
module.weight.copy_(weight) module.weight.copy_(weight)
@ -184,7 +184,7 @@ def load_lora(name, filename):
elif lora_key == "lora_down.weight": elif lora_key == "lora_down.weight":
lora_module.down = module lora_module.down = module
else: else:
assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha' raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha")
if len(keys_failed_to_match) > 0: if len(keys_failed_to_match) > 0:
print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}") print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}")
@ -202,7 +202,7 @@ def load_loras(names, multipliers=None):
loaded_loras.clear() loaded_loras.clear()
loras_on_disk = [available_lora_aliases.get(name, None) for name in names] loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
if any([x is None for x in loras_on_disk]): if any(x is None for x in loras_on_disk):
list_available_loras() list_available_loras()
loras_on_disk = [available_lora_aliases.get(name, None) for name in names] loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
@ -309,7 +309,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu
print(f'failed to calculate lora weights for layer {lora_layer_name}') print(f'failed to calculate lora weights for layer {lora_layer_name}')
setattr(self, "lora_current_names", wanted_names) self.lora_current_names = wanted_names
def lora_forward(module, input, original_forward): def lora_forward(module, input, original_forward):
@ -343,8 +343,8 @@ def lora_forward(module, input, original_forward):
def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
setattr(self, "lora_current_names", ()) self.lora_current_names = ()
setattr(self, "lora_weights_backup", None) self.lora_weights_backup = None
def lora_Linear_forward(self, input): def lora_Linear_forward(self, input):

View File

@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted)
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
"sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras), "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(lora.available_loras)}, refresh=lora.list_available_loras),
})) }))

View File

@ -35,7 +35,7 @@ def list_config_states():
j["filepath"] = path j["filepath"] = path
config_states.append(j) config_states.append(j)
config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)) config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)
for cs in config_states: for cs in config_states:
timestamp = time.asctime(time.gmtime(cs["created_at"])) timestamp = time.asctime(time.gmtime(cs["created_at"]))

View File

@ -78,7 +78,7 @@ class DeepDanbooru:
res = [] res = []
filtertags = set([x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")]) filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")}
for tag in [x for x in tags if x not in filtertags]: for tag in [x for x in tags if x not in filtertags]:
probability = probability_dict[tag] probability = probability_dict[tag]

View File

@ -65,7 +65,7 @@ def enable_tf32():
# enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
# see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407 # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
if any([torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())]): if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
torch.backends.cudnn.benchmark = True torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cuda.matmul.allow_tf32 = True

View File

@ -403,7 +403,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None):
k = self.to_k(context_k) k = self.to_k(context_k)
v = self.to_v(context_v) v = self.to_v(context_v)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale sim = einsum('b i d, b j d -> b i j', q, k) * self.scale

View File

@ -5,13 +5,13 @@ import modules.hypernetworks.hypernetwork
from modules import devices, sd_hijack, shared from modules import devices, sd_hijack, shared
not_available = ["hardswish", "multiheadattention"] not_available = ["hardswish", "multiheadattention"]
keys = list(x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available) keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available]
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure)
return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {filename}", "" return gr.Dropdown.update(choices=sorted(shared.hypernetworks.keys())), f"Created: {filename}", ""
def train_hypernetwork(*args): def train_hypernetwork(*args):

View File

@ -159,7 +159,7 @@ class InterrogateModels:
text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)] text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
top_count = min(top_count, len(text_array)) top_count = min(top_count, len(text_array))
text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(devices.device_interrogate) text_tokens = clip.tokenize(list(text_array), truncate=True).to(devices.device_interrogate)
text_features = self.clip_model.encode_text(text_tokens).type(self.dtype) text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
text_features /= text_features.norm(dim=-1, keepdim=True) text_features /= text_features.norm(dim=-1, keepdim=True)

View File

@ -39,7 +39,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
if os.path.islink(full_path) and not os.path.exists(full_path): if os.path.islink(full_path) and not os.path.exists(full_path):
print(f"Skipping broken symlink: {full_path}") print(f"Skipping broken symlink: {full_path}")
continue continue
if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist):
continue continue
if full_path not in output: if full_path not in output:
output.append(full_path) output.append(full_path)

View File

@ -1130,7 +1130,7 @@ class LatentDiffusion(DDPM):
if cond is not None: if cond is not None:
if isinstance(cond, dict): if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond} [x[:batch_size] for x in cond[key]] for key in cond}
else: else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
@ -1229,7 +1229,7 @@ class LatentDiffusion(DDPM):
if cond is not None: if cond is not None:
if isinstance(cond, dict): if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond} [x[:batch_size] for x in cond[key]] for key in cond}
else: else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
return self.p_sample_loop(cond, return self.p_sample_loop(cond,

View File

@ -17,7 +17,7 @@ class ScriptPostprocessingForMainUI(scripts.Script):
return self.postprocessing_controls.values() return self.postprocessing_controls.values()
def postprocess_image(self, p, script_pp, *args): def postprocess_image(self, p, script_pp, *args):
args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)} args_dict = dict(zip(self.postprocessing_controls, args))
pp = scripts_postprocessing.PostprocessedImage(script_pp.image) pp = scripts_postprocessing.PostprocessedImage(script_pp.image)
pp.info = {} pp.info = {}

View File

@ -37,7 +37,7 @@ def apply_optimizations():
optimization_method = None optimization_method = None
can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) # not everyone has torch 2.x to use sdp can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)): if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
print("Applying xformers cross attention optimization.") print("Applying xformers cross attention optimization.")

View File

@ -49,7 +49,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
v_in = self.to_v(context_v) v_in = self.to_v(context_v)
del context, context_k, context_v, x del context, context_k, context_v, x
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in))
del q_in, k_in, v_in del q_in, k_in, v_in
dtype = q.dtype dtype = q.dtype
@ -98,7 +98,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
del context, x del context, x
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in))
del q_in, k_in, v_in del q_in, k_in, v_in
r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
@ -229,7 +229,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None):
with devices.without_autocast(disable=not shared.opts.upcast_attn): with devices.without_autocast(disable=not shared.opts.upcast_attn):
k = k * self.scale k = k * self.scale
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v))
r = einsum_op(q, k, v) r = einsum_op(q, k, v)
r = r.to(dtype) r = r.to(dtype)
return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h)) return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h))
@ -334,7 +334,7 @@ def xformers_attention_forward(self, x, context=None, mask=None):
k_in = self.to_k(context_k) k_in = self.to_k(context_k)
v_in = self.to_v(context_v) v_in = self.to_v(context_v)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in)) q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in))
del q_in, k_in, v_in del q_in, k_in, v_in
dtype = q.dtype dtype = q.dtype
@ -460,7 +460,7 @@ def xformers_attnblock_forward(self, x):
k = self.k(h_) k = self.k(h_)
v = self.v(h_) v = self.v(h_)
b, c, h, w = q.shape b, c, h, w = q.shape
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v))
dtype = q.dtype dtype = q.dtype
if shared.opts.upcast_attn: if shared.opts.upcast_attn:
q, k = q.float(), k.float() q, k = q.float(), k.float()
@ -482,7 +482,7 @@ def sdp_attnblock_forward(self, x):
k = self.k(h_) k = self.k(h_)
v = self.v(h_) v = self.v(h_)
b, c, h, w = q.shape b, c, h, w = q.shape
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v))
dtype = q.dtype dtype = q.dtype
if shared.opts.upcast_attn: if shared.opts.upcast_attn:
q, k = q.float(), k.float() q, k = q.float(), k.float()
@ -506,7 +506,7 @@ def sub_quad_attnblock_forward(self, x):
k = self.k(h_) k = self.k(h_)
v = self.v(h_) v = self.v(h_)
b, c, h, w = q.shape b, c, h, w = q.shape
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v))
q = q.contiguous() q = q.contiguous()
k = k.contiguous() k = k.contiguous()
v = v.contiguous() v = v.contiguous()

View File

@ -83,7 +83,7 @@ class VanillaStableDiffusionSampler:
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step) unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers' assert all(len(conds) == 1 for conds in conds_list), 'composition via AND is not supported for DDIM/PLMS samplers'
cond = tensor cond = tensor
# for DDIM, shapes must match, we can't just process cond and uncond independently; # for DDIM, shapes must match, we can't just process cond and uncond independently;

View File

@ -86,7 +86,7 @@ class CFGDenoiser(torch.nn.Module):
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
assert not is_edit_model or all([len(conds) == 1 for conds in conds_list]), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
batch_size = len(conds_list) batch_size = len(conds_list)
repeats = [len(conds_list[i]) for i in range(batch_size)] repeats = [len(conds_list[i]) for i in range(batch_size)]

View File

@ -381,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), {
"extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"), "extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"),
"extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"), "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"),
"extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"), "extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"),
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks), "sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + list(hypernetworks.keys())}, refresh=reload_hypernetworks),
})) }))
options_templates.update(options_section(('ui', "User interface"), { options_templates.update(options_section(('ui', "User interface"), {
@ -403,7 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
"keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"), "keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"),
"quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}), "quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}),
"hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}), "hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}),
"ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"),
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"),
"localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
@ -583,7 +583,7 @@ class Options:
if item.section not in section_ids: if item.section not in section_ids:
section_ids[item.section] = len(section_ids) section_ids[item.section] = len(section_ids)
self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])} self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
def cast_value(self, key, value): def cast_value(self, key, value):
"""casts an arbitrary to the same type as this setting's value with key """casts an arbitrary to the same type as this setting's value with key

View File

@ -167,7 +167,7 @@ class EmbeddingDatabase:
if 'string_to_param' in data: if 'string_to_param' in data:
param_dict = data['string_to_param'] param_dict = data['string_to_param']
if hasattr(param_dict, '_parameters'): if hasattr(param_dict, '_parameters'):
param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11 param_dict = param_dict._parameters # fix for torch 1.12.1 loading saved file from torch 1.11
assert len(param_dict) == 1, 'embedding file has multiple terms in it' assert len(param_dict) == 1, 'embedding file has multiple terms in it'
emb = next(iter(param_dict.items()))[1] emb = next(iter(param_dict.items()))[1]
# diffuser concepts # diffuser concepts

View File

@ -1222,7 +1222,7 @@ def create_ui():
) )
def get_textual_inversion_template_names(): def get_textual_inversion_template_names():
return sorted([x for x in textual_inversion.textual_inversion_templates]) return sorted(textual_inversion.textual_inversion_templates)
with gr.Tab(label="Train", id="train"): with gr.Tab(label="Train", id="train"):
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images <a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\" style=\"font-weight:bold;\">[wiki]</a></p>") gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images <a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\" style=\"font-weight:bold;\">[wiki]</a></p>")
@ -1230,8 +1230,8 @@ def create_ui():
train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()]) train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=list(shared.hypernetworks.keys()))
create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name") create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks.keys())}, "refresh_train_hypernetwork_name")
with FormRow(): with FormRow():
embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
@ -1808,7 +1808,7 @@ def create_ui():
if type(x) == gr.Dropdown: if type(x) == gr.Dropdown:
def check_dropdown(val): def check_dropdown(val):
if getattr(x, 'multiselect', False): if getattr(x, 'multiselect', False):
return all([value in x.choices for value in val]) return all(value in x.choices for value in val)
else: else:
return val in x.choices return val in x.choices

View File

@ -26,7 +26,7 @@ def register_page(page):
def fetch_file(filename: str = ""): def fetch_file(filename: str = ""):
from starlette.responses import FileResponse from starlette.responses import FileResponse
if not any([Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs]): if not any(Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs):
raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.") raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.")
ext = os.path.splitext(filename)[1].lower() ext = os.path.splitext(filename)[1].lower()
@ -326,7 +326,7 @@ def setup_ui(ui, gallery):
is_allowed = False is_allowed = False
for extra_page in ui.stored_extra_pages: for extra_page in ui.stored_extra_pages:
if any([path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()]): if any(path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()):
is_allowed = True is_allowed = True
break break

View File

@ -23,7 +23,7 @@ def register_tmp_file(gradio, filename):
def check_tmp_file(gradio, filename): def check_tmp_file(gradio, filename):
if hasattr(gradio, 'temp_file_sets'): if hasattr(gradio, 'temp_file_sets'):
return any([filename in fileset for fileset in gradio.temp_file_sets]) return any(filename in fileset for fileset in gradio.temp_file_sets)
if hasattr(gradio, 'temp_dirs'): if hasattr(gradio, 'temp_dirs'):
return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs) return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs)