imports cleanup for ruff

This commit is contained in:
AUTOMATIC 2023-05-10 08:43:42 +03:00
parent 96d6ca4199
commit f741a98bac
48 changed files with 42 additions and 114 deletions

View File

@ -1,4 +1,3 @@
import glob
import os
import re
import torch

View File

@ -13,7 +13,6 @@ import modules.upscaler
from modules import devices, modelloader
from scunet_model_arch import SCUNet as net
from modules.shared import opts
from modules import images
class UpscalerScuNET(modules.upscaler.Upscaler):

View File

@ -1,4 +1,3 @@
import contextlib
import os
import numpy as np
@ -8,7 +7,7 @@ from basicsr.utils.download_util import load_file_from_url
from tqdm import tqdm
from modules import modelloader, devices, script_callbacks, shared
from modules.shared import cmd_opts, opts, state
from modules.shared import opts, state
from swinir_model_arch import SwinIR as net
from swinir_model_arch_v2 import Swin2SR as net2
from modules.upscaler import Upscaler, UpscalerData

View File

@ -1,14 +1,12 @@
# this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py
import math
import numpy as np
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from typing import Optional, List
from typing import Optional
from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock
from basicsr.utils import get_root_logger
from basicsr.utils.registry import ARCH_REGISTRY
def calc_mean_std(feat, eps=1e-5):

View File

@ -5,11 +5,9 @@ VQGAN code, adapted from the original created by the Unleashing Transformers aut
https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
'''
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from basicsr.utils import get_root_logger
from basicsr.utils.registry import ARCH_REGISTRY

View File

@ -33,11 +33,9 @@ def setup_model(dirname):
try:
from torchvision.transforms.functional import normalize
from modules.codeformer.codeformer_arch import CodeFormer
from basicsr.utils.download_util import load_file_from_url
from basicsr.utils import imwrite, img2tensor, tensor2img
from basicsr.utils import img2tensor, tensor2img
from facelib.utils.face_restoration_helper import FaceRestoreHelper
from facelib.detection.retinaface import retinaface
from modules.shared import cmd_opts
net_class = CodeFormer

View File

@ -14,7 +14,7 @@ from collections import OrderedDict
import git
from modules import shared, extensions
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir
from modules.paths_internal import script_path, config_states_dir
all_config_states = OrderedDict()

View File

@ -6,7 +6,7 @@ from PIL import Image
from basicsr.utils.download_util import load_file_from_url
import modules.esrgan_model_arch as arch
from modules import shared, modelloader, images, devices
from modules import modelloader, images, devices
from modules.upscaler import Upscaler, UpscalerData
from modules.shared import opts

View File

@ -2,7 +2,6 @@
from collections import OrderedDict
import math
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F

View File

@ -3,7 +3,6 @@ import sys
import traceback
import time
from datetime import datetime
import git
from modules import shared

View File

@ -1,15 +1,11 @@
import base64
import html
import io
import math
import os
import re
from pathlib import Path
import gradio as gr
from modules.paths import data_path
from modules import shared, ui_tempdir, script_callbacks
import tempfile
from PIL import Image
re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)'

View File

@ -1,4 +1,3 @@
import csv
import datetime
import glob
import html
@ -18,7 +17,7 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
from collections import defaultdict, deque
from collections import deque
from statistics import stdev, mean

View File

@ -1,6 +1,4 @@
import html
import os
import re
import gradio as gr
import modules.hypernetworks.hypernetwork

View File

@ -19,7 +19,7 @@ import json
import hashlib
from modules import sd_samplers, shared, script_callbacks, errors
from modules.shared import opts, cmd_opts
from modules.shared import opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)

View File

@ -1,12 +1,9 @@
import math
import os
import sys
import traceback
import numpy as np
from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError
from modules import devices, sd_samplers
from modules import sd_samplers
from modules.generation_parameters_copypaste import create_override_settings_dict
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state

View File

@ -1,6 +1,5 @@
import torch
import platform
from modules import paths
from modules.sd_hijack_utils import CondFunc
from packaging import version

View File

@ -1,4 +1,3 @@
import glob
import os
import shutil
import importlib

View File

@ -1,5 +1,4 @@
import torch
import torch.nn.functional as F
import math
from tqdm.auto import trange

View File

@ -2,7 +2,6 @@ import json
import math
import os
import sys
import warnings
import hashlib
import torch
@ -11,10 +10,10 @@ from PIL import Image, ImageFilter, ImageOps
import random
import cv2
from skimage import exposure
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List
import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared

View File

@ -3,7 +3,7 @@ from torch.nn.functional import silu
from types import MethodType
import modules.textual_inversion.textual_inversion
from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint
from modules import devices, sd_hijack_optimizations, shared
from modules.hypernetworks import hypernetwork
from modules.shared import cmd_opts
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr

View File

@ -1,15 +1,9 @@
import os
import torch
from einops import repeat
from omegaconf import ListConfig
import ldm.models.diffusion.ddpm
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
from ldm.models.diffusion.ddpm import LatentDiffusion
from ldm.models.diffusion.plms import PLMSSampler
from ldm.models.diffusion.ddim import DDIMSampler, noise_like
from ldm.models.diffusion.sampling_util import norm_thresholding

View File

@ -1,8 +1,5 @@
import collections
import os.path
import sys
import gc
import time
def should_hijack_ip2p(checkpoint_info):
from modules import sd_models_config

View File

@ -1,8 +1,6 @@
import open_clip.tokenizer
import torch
from modules import sd_hijack_clip, devices
from modules.shared import opts
class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords):

View File

@ -565,7 +565,7 @@ def reload_model_weights(sd_model=None, info=None):
def unload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
from modules import devices, sd_hijack
timer = Timer()
if model_data.sd_model:

View File

@ -1,4 +1,3 @@
import re
import os
import torch

View File

@ -1,7 +1,6 @@
from collections import deque
import torch
import inspect
import einops
import k_diffusion.sampling
from modules import prompt_parser, devices, sd_samplers_common

View File

@ -1,8 +1,5 @@
import torch
import safetensors.torch
import os
import collections
from collections import namedtuple
from modules import paths, shared, devices, script_callbacks, sd_models
import glob
from copy import deepcopy

View File

@ -1,12 +1,9 @@
import argparse
import datetime
import json
import os
import sys
import time
import requests
from PIL import Image
import gradio as gr
import tqdm

View File

@ -1,18 +1,9 @@
# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime
from __future__ import annotations
import csv
import os
import os.path
import typing
import collections.abc as abc
import tempfile
import shutil
if typing.TYPE_CHECKING:
# Only import this when code is being type-checked, it doesn't have any effect at runtime
from .processing import StableDiffusionProcessing
class PromptStyle(typing.NamedTuple):
name: str

View File

@ -1,10 +1,8 @@
import cv2
import requests
import os
from collections import defaultdict
from math import log, sqrt
import numpy as np
from PIL import Image, ImageDraw
from PIL import ImageDraw
GREEN = "#0F0"
BLUE = "#00F"

View File

@ -2,7 +2,7 @@ import base64
import json
import numpy as np
import zlib
from PIL import Image, PngImagePlugin, ImageDraw, ImageFont
from PIL import Image, ImageDraw, ImageFont
from fonts.ttf import Roboto
import torch
from modules.shared import opts

View File

@ -1,13 +1,9 @@
import os
from PIL import Image, ImageOps
import math
import platform
import sys
import tqdm
import time
from modules import paths, shared, images, deepbooru
from modules.shared import opts, cmd_opts
from modules.textual_inversion import autocrop

View File

@ -1,7 +1,6 @@
import os
import sys
import traceback
import inspect
from collections import namedtuple
import torch

View File

@ -1,18 +1,15 @@
import modules.scripts
from modules import sd_samplers
from modules import sd_samplers, processing
from modules.generation_parameters_copypaste import create_override_settings_dict
from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \
StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, cmd_opts
import modules.shared as shared
import modules.processing as processing
from modules.ui import plaintext_to_html
def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
p = StableDiffusionProcessingTxt2Img(
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids,
@ -53,7 +50,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
processed = modules.scripts.scripts_txt2img.run(p, *args)
if processed is None:
processed = process_images(p)
processed = processing.process_images(p)
p.close()

View File

@ -14,10 +14,10 @@ from PIL import Image, PngImagePlugin
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress
from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path, data_path
from modules.shared import opts, cmd_opts, restricted_opts
from modules.shared import opts, cmd_opts
import modules.codeformer_model
import modules.generation_parameters_copypaste as parameters_copypaste
@ -28,7 +28,6 @@ import modules.shared as shared
import modules.styles
import modules.textual_inversion.ui
from modules import prompt_parser
from modules.images import save_image
from modules.sd_hijack import model_hijack
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.textual_inversion import textual_inversion

View File

@ -1,4 +1,3 @@
import glob
import os.path
import urllib.parse
from pathlib import Path

View File

@ -1,5 +1,5 @@
import gradio as gr
from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue
from modules import scripts, shared, ui_common, postprocessing, call_queue
import modules.generation_parameters_copypaste as parameters_copypaste

View File

@ -2,8 +2,6 @@ import os
from abc import abstractmethod
import PIL
import numpy as np
import torch
from PIL import Image
import modules.shared

View File

@ -1,4 +1,4 @@
from transformers import BertPreTrainedModel,BertModel,BertConfig
from transformers import BertPreTrainedModel, BertConfig
import torch.nn as nn
import torch
from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig

View File

@ -1,10 +1,13 @@
[tool.ruff]
exclude = ["extensions"]
ignore = [
"E501",
"E731",
"E402", # Module level import not at top of file
"F401" # Module imported but unused
"F401", # Module imported but unused
]
exclude = ["extensions"]
[tool.ruff.per-file-ignores]
"webui.py" = ["E402"] # Module level import not at top of file

View File

@ -4,7 +4,7 @@ import ast
import copy
from modules.processing import Processed
from modules.shared import opts, cmd_opts, state
from modules.shared import cmd_opts
def convertExpr2Expression(expr):

View File

@ -7,9 +7,9 @@ import modules.scripts as scripts
import gradio as gr
from PIL import Image, ImageDraw
from modules import images, processing, devices
from modules import images
from modules.processing import Processed, process_images
from modules.shared import opts, cmd_opts, state
from modules.shared import opts, state
# this function is taken from https://github.com/parlance-zz/g-diffuser-bot

View File

@ -4,9 +4,9 @@ import modules.scripts as scripts
import gradio as gr
from PIL import Image, ImageDraw
from modules import images, processing, devices
from modules import images, devices
from modules.processing import Processed, process_images
from modules.shared import opts, cmd_opts, state
from modules.shared import opts, state
class Script(scripts.Script):

View File

@ -1,14 +1,11 @@
import math
from collections import namedtuple
from copy import copy
import random
import modules.scripts as scripts
import gradio as gr
from modules import images
from modules.processing import process_images, Processed
from modules.shared import opts, cmd_opts, state
from modules.processing import process_images
from modules.shared import opts, state
import modules.sd_samplers

View File

@ -1,6 +1,4 @@
import copy
import math
import os
import random
import sys
import traceback
@ -11,8 +9,7 @@ import gradio as gr
from modules import sd_samplers
from modules.processing import Processed, process_images
from PIL import Image
from modules.shared import opts, cmd_opts, state
from modules.shared import state
def process_string_tag(tag):

View File

@ -4,9 +4,9 @@ import modules.scripts as scripts
import gradio as gr
from PIL import Image
from modules import processing, shared, sd_samplers, images, devices
from modules import processing, shared, images, devices
from modules.processing import Processed
from modules.shared import opts, cmd_opts, state
from modules.shared import opts, state
class Script(scripts.Script):

View File

@ -10,15 +10,13 @@ import numpy as np
import modules.scripts as scripts
import gradio as gr
from modules import images, paths, sd_samplers, processing, sd_models, sd_vae
from modules import images, sd_samplers, processing, sd_models, sd_vae
from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img
from modules.shared import opts, cmd_opts, state
from modules.shared import opts, state
import modules.shared as shared
import modules.sd_samplers
import modules.sd_models
import modules.sd_vae
import glob
import os
import re
from modules.ui_components import ToolButton

View File

@ -43,7 +43,7 @@ if ".dev" in torch.__version__ or "+git" in torch.__version__:
torch.__long_version__ = torch.__version__
torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states
from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states
import modules.codeformer_model as codeformer
import modules.face_restoration
import modules.gfpgan_model as gfpgan