From dc6779f6f384a83ee2b96e60d841116bd4a878eb Mon Sep 17 00:00:00 2001 From: Dynamic Date: Wed, 26 Oct 2022 19:52:34 +0900 Subject: [PATCH 01/71] Update new strings Translated new strings in PFF UI --- localizations/ko_KR.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json index ff70f1ea..803b7fb9 100644 --- a/localizations/ko_KR.json +++ b/localizations/ko_KR.json @@ -218,6 +218,7 @@ "Interrogate: use artists from artists.csv": "분석 : artists.csv의 작가들 사용하기", "Interrupt": "중단", "Is negative text": "네거티브 텍스트일시 체크", + "Iterate seed every line": "줄마다 시드 반복하기", "Just resize": "리사이징", "Keep -1 for seeds": "시드값 -1로 유지", "keep whatever was there originally": "이미지 원본 유지", @@ -234,6 +235,7 @@ "Leave blank to save images to the default path.": "기존 저장 경로에 이미지들을 저장하려면 비워두세요.", "left": "왼쪽", "linear": "linear", + "List of prompt inputs": "프롬프트 입력 리스트", "List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "설정 탭이 아니라 상단의 빠른 설정 바에 위치시킬 설정 이름을 쉼표로 분리해서 입력하십시오. 설정 이름은 modules/shared.py에서 찾을 수 있습니다. 재시작이 필요합니다.", "LMS": "LMS", "LMS Karras": "LMS Karras", @@ -464,6 +466,7 @@ "uniform": "uniform", "up": "위쪽", "Upload mask": "마스크 업로드하기", + "Upload prompt inputs": "입력할 프롬프트를 업로드하십시오", "Upscale latent space image when doing hires. fix": "고해상도 보정 사용시 잠재 공간 이미지 업스케일하기", "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "마스크된 부분을 설정된 해상도로 업스케일하고, 인페인팅을 진행한 뒤, 다시 다운스케일 후 원본 이미지에 붙여넣습니다.", "Upscaler": "업스케일러", From fddb4883f4a408b3464076465e1b0949ebe0fc30 Mon Sep 17 00:00:00 2001 From: evshiron Date: Wed, 26 Oct 2022 22:33:45 +0800 Subject: [PATCH 02/71] prototype progress api --- modules/api/api.py | 89 ++++++++++++++++++++++++++++++++++++++-------- modules/shared.py | 13 +++++++ 2 files changed, 88 insertions(+), 14 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 6e9d6097..c038f674 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,8 +1,11 @@ +import time + from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.sd_samplers import all_samplers from modules.extras import run_pnginfo import modules.shared as shared +from modules import devices import uvicorn from fastapi import Body, APIRouter, HTTPException from fastapi.responses import JSONResponse @@ -25,6 +28,37 @@ class ImageToImageResponse(BaseModel): parameters: Json info: Json +class ProgressResponse(BaseModel): + progress: float + eta_relative: float + state: Json + +# copy from wrap_gradio_gpu_call of webui.py +# because queue lock will be acquired in api handlers +# and time start needs to be set +# the function has been modified into two parts + +def before_gpu_call(): + devices.torch_gc() + + shared.state.sampling_step = 0 + shared.state.job_count = -1 + shared.state.job_no = 0 + shared.state.job_timestamp = shared.state.get_job_timestamp() + shared.state.current_latent = None + shared.state.current_image = None + shared.state.current_image_sampling_step = 0 + shared.state.skipped = False + shared.state.interrupted = False + shared.state.textinfo = None + shared.state.time_start = time.time() + + +def after_gpu_call(): + shared.state.job = "" + shared.state.job_count = 0 + + devices.torch_gc() class Api: def __init__(self, app, queue_lock): @@ -33,6 +67,7 @@ class Api: self.queue_lock = queue_lock self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"]) self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"]) def __base64_to_image(self, base64_string): # if has a comma, deal with prefix @@ -44,12 +79,12 @@ class Api: def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) - + if sampler_index is None: - raise HTTPException(status_code=404, detail="Sampler not found") - + raise HTTPException(status_code=404, detail="Sampler not found") + populate = txt2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, + "sd_model": shared.sd_model, "sampler_index": sampler_index[0], "do_not_save_samples": True, "do_not_save_grid": True @@ -57,9 +92,11 @@ class Api: ) p = StableDiffusionProcessingTxt2Img(**vars(populate)) # Override object param + before_gpu_call() with self.queue_lock: processed = process_images(p) - + after_gpu_call() + b64images = [] for i in processed.images: buffer = io.BytesIO() @@ -67,30 +104,30 @@ class Api: b64images.append(base64.b64encode(buffer.getvalue())) return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=processed.js()) - - + + def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): sampler_index = sampler_to_index(img2imgreq.sampler_index) - + if sampler_index is None: - raise HTTPException(status_code=404, detail="Sampler not found") + raise HTTPException(status_code=404, detail="Sampler not found") init_images = img2imgreq.init_images if init_images is None: - raise HTTPException(status_code=404, detail="Init image not found") + raise HTTPException(status_code=404, detail="Init image not found") mask = img2imgreq.mask if mask: mask = self.__base64_to_image(mask) - + populate = img2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, + "sd_model": shared.sd_model, "sampler_index": sampler_index[0], "do_not_save_samples": True, - "do_not_save_grid": True, + "do_not_save_grid": True, "mask": mask } ) @@ -103,9 +140,11 @@ class Api: p.init_images = imgs # Override object param + before_gpu_call() with self.queue_lock: processed = process_images(p) - + after_gpu_call() + b64images = [] for i in processed.images: buffer = io.BytesIO() @@ -118,6 +157,28 @@ class Api: return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=processed.js()) + def progressapi(self): + # copy from check_progress_call of ui.py + + if shared.state.job_count == 0: + return ProgressResponse(progress=0, eta_relative=0, state=shared.state.js()) + + # avoid dividing zero + progress = 0.01 + + if shared.state.job_count > 0: + progress += shared.state.job_no / shared.state.job_count + if shared.state.sampling_steps > 0: + progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps + + time_since_start = time.time() - shared.state.time_start + eta = (time_since_start/progress) + eta_relative = eta-time_since_start + + progress = min(progress, 1) + + return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.js()) + def extrasapi(self): raise NotImplementedError diff --git a/modules/shared.py b/modules/shared.py index 1a9b8289..00f61898 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -146,6 +146,19 @@ class State: def get_job_timestamp(self): return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp? + def js(self): + obj = { + "skipped": self.skipped, + "interrupted": self.skipped, + "job": self.job, + "job_count": self.job_count, + "job_no": self.job_no, + "sampling_step": self.sampling_step, + "sampling_steps": self.sampling_steps, + } + + return json.dumps(obj) + state = State() From 0dd8480281ffa3e58439a3ce059c02d9f3baa5c7 Mon Sep 17 00:00:00 2001 From: MMaker Date: Wed, 26 Oct 2022 11:08:44 -0400 Subject: [PATCH 03/71] fix: Correct before image saved callback --- modules/script_callbacks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 6ea58d61..cedbe7bd 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -69,7 +69,7 @@ def ui_settings_callback(): def before_image_saved_callback(params: ImageSaveParams): - for c in callbacks_image_saved: + for c in callbacks_before_image_saved: try: c.callback(params) except Exception: From 035b875e1a3ce4a925aaba3dd55baea21e765e7f Mon Sep 17 00:00:00 2001 From: Dynamic Date: Thu, 27 Oct 2022 20:14:51 +0900 Subject: [PATCH 04/71] Edit CODEOWNERS for ko_KR.json permissions --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index 935fedcf..fa7b93c7 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1,2 @@ * @AUTOMATIC1111 +localizations/ko_KR.json @36DB \ No newline at end of file From 9b50d9bbbfa61828c318373d5a7f0a6c1f3e273c Mon Sep 17 00:00:00 2001 From: Dynamic Date: Thu, 27 Oct 2022 20:16:51 +0900 Subject: [PATCH 05/71] Update CODEOWNERS file --- CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/CODEOWNERS b/CODEOWNERS index fa7b93c7..19e49bf5 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,3 @@ * @AUTOMATIC1111 +[Localization] localizations/ko_KR.json @36DB \ No newline at end of file From 6a0eebfbee40c4c120c480ca229357cd3a9e69f3 Mon Sep 17 00:00:00 2001 From: Dynamic Date: Thu, 27 Oct 2022 20:21:15 +0900 Subject: [PATCH 06/71] Update 2 cause I'm an idiot --- CODEOWNERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 19e49bf5..d42e6412 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,3 +1,4 @@ * @AUTOMATIC1111 + [Localization] -localizations/ko_KR.json @36DB \ No newline at end of file +/localizations/* @36DB \ No newline at end of file From 34c953381192a2c0167fc1385f91b57b18a603c1 Mon Sep 17 00:00:00 2001 From: Dynamic Date: Thu, 27 Oct 2022 20:23:33 +0900 Subject: [PATCH 07/71] Apparently brackets don't work, gitlab docs fooled me --- CODEOWNERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index d42e6412..12e87aae 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,4 +1,3 @@ * @AUTOMATIC1111 -[Localization] -/localizations/* @36DB \ No newline at end of file +/localizations/ko_KR.json @36DB \ No newline at end of file From 6e10078b2be8e690b5f85619b335e1dcd4fa8a3f Mon Sep 17 00:00:00 2001 From: Dynamic Date: Thu, 27 Oct 2022 22:21:56 +0900 Subject: [PATCH 08/71] Attention editing with hotkeys should work with KR now Added the word "Prompt" in the placeholders to pass the check from edit-attention.js --- localizations/webui.bat | 62 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 localizations/webui.bat diff --git a/localizations/webui.bat b/localizations/webui.bat new file mode 100644 index 00000000..a38a28bb --- /dev/null +++ b/localizations/webui.bat @@ -0,0 +1,62 @@ +@echo off + +if not defined PYTHON (set PYTHON=python) +if not defined VENV_DIR (set VENV_DIR=venv) + +set ERROR_REPORTING=FALSE + +mkdir tmp 2>NUL + +%PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt +if %ERRORLEVEL% == 0 goto :start_venv +echo Couldn't launch python +goto :show_stdout_stderr + +:start_venv +if [%VENV_DIR%] == [-] goto :skip_venv + +dir %VENV_DIR%\Scripts\Python.exe >tmp/stdout.txt 2>tmp/stderr.txt +if %ERRORLEVEL% == 0 goto :activate_venv + +for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i" +echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME% +%PYTHON_FULLNAME% -m venv %VENV_DIR% >tmp/stdout.txt 2>tmp/stderr.txt +if %ERRORLEVEL% == 0 goto :activate_venv +echo Unable to create venv in directory %VENV_DIR% +goto :show_stdout_stderr + +:activate_venv +set PYTHON="%~dp0%VENV_DIR%\Scripts\Python.exe" +echo venv %PYTHON% +goto :launch + +:skip_venv + +:launch +%PYTHON% launch.py %* +pause +exit /b + +:show_stdout_stderr + +echo. +echo exit code: %errorlevel% + +for /f %%i in ("tmp\stdout.txt") do set size=%%~zi +if %size% equ 0 goto :show_stderr +echo. +echo stdout: +type tmp\stdout.txt + +:show_stderr +for /f %%i in ("tmp\stderr.txt") do set size=%%~zi +if %size% equ 0 goto :show_stderr +echo. +echo stderr: +type tmp\stderr.txt + +:endofscript + +echo. +echo Launch unsuccessful. Exiting. +pause From 9358a421cfa2ccd866825d8022f93a12268e7dc3 Mon Sep 17 00:00:00 2001 From: Dynamic Date: Thu, 27 Oct 2022 22:24:05 +0900 Subject: [PATCH 09/71] Remove files that shouldn't be here --- localizations/webui.bat | 62 ----------------------------------------- 1 file changed, 62 deletions(-) delete mode 100644 localizations/webui.bat diff --git a/localizations/webui.bat b/localizations/webui.bat deleted file mode 100644 index a38a28bb..00000000 --- a/localizations/webui.bat +++ /dev/null @@ -1,62 +0,0 @@ -@echo off - -if not defined PYTHON (set PYTHON=python) -if not defined VENV_DIR (set VENV_DIR=venv) - -set ERROR_REPORTING=FALSE - -mkdir tmp 2>NUL - -%PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :start_venv -echo Couldn't launch python -goto :show_stdout_stderr - -:start_venv -if [%VENV_DIR%] == [-] goto :skip_venv - -dir %VENV_DIR%\Scripts\Python.exe >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :activate_venv - -for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i" -echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME% -%PYTHON_FULLNAME% -m venv %VENV_DIR% >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :activate_venv -echo Unable to create venv in directory %VENV_DIR% -goto :show_stdout_stderr - -:activate_venv -set PYTHON="%~dp0%VENV_DIR%\Scripts\Python.exe" -echo venv %PYTHON% -goto :launch - -:skip_venv - -:launch -%PYTHON% launch.py %* -pause -exit /b - -:show_stdout_stderr - -echo. -echo exit code: %errorlevel% - -for /f %%i in ("tmp\stdout.txt") do set size=%%~zi -if %size% equ 0 goto :show_stderr -echo. -echo stdout: -type tmp\stdout.txt - -:show_stderr -for /f %%i in ("tmp\stderr.txt") do set size=%%~zi -if %size% equ 0 goto :show_stderr -echo. -echo stderr: -type tmp\stderr.txt - -:endofscript - -echo. -echo Launch unsuccessful. Exiting. -pause From a668444110743cd163474ec563b0e69025dea3d2 Mon Sep 17 00:00:00 2001 From: Dynamic Date: Thu, 27 Oct 2022 22:24:29 +0900 Subject: [PATCH 10/71] Attention editing hotkey fix part 2 --- localizations/ko_KR.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json index 803b7fb9..3d631066 100644 --- a/localizations/ko_KR.json +++ b/localizations/ko_KR.json @@ -263,7 +263,7 @@ "Multiplier (M) - set to 0 to get model A": "배율 (M) - 0으로 적용하면 모델 A를 얻게 됩니다", "Name": "이름", "Negative prompt": "네거티브 프롬프트", - "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "네거티브 프롬프트 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)", + "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "네거티브 프롬프트(Prompt) 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)", "Next batch": "다음 묶음", "Next Page": "다음 페이지", "None": "없음", @@ -321,7 +321,7 @@ "Process images in a directory on the same machine where the server is running.": "WebUI 서버가 돌아가고 있는 디바이스에 존재하는 디렉토리의 이미지들을 처리합니다.", "Produce an image that can be tiled.": "타일링 가능한 이미지를 생성합니다.", "Prompt": "프롬프트", - "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "프롬프트 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)", + "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "프롬프트(Prompt) 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)", "Prompt matrix": "프롬프트 매트릭스", "Prompt order": "프롬프트 순서", "Prompt S/R": "프롬프트 스타일 변경", From 9e465c8aa5616df4c6723bee007ffd3910404f12 Mon Sep 17 00:00:00 2001 From: timntorres Date: Thu, 27 Oct 2022 23:03:34 -0700 Subject: [PATCH 11/71] Add strength to textinfo. --- modules/processing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/processing.py b/modules/processing.py index 4efba946..93066522 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -329,6 +329,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name), + "Hypernetwork strength": (None if shared.loaded_hypernetwork is None else shared.opts.sd_hypernetwork_strength), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), From d4a069a23cb19104b4e58a33d0d1670fadaefb7a Mon Sep 17 00:00:00 2001 From: timntorres Date: Thu, 27 Oct 2022 23:16:27 -0700 Subject: [PATCH 12/71] Read hypernet strength from PNG info. --- modules/ui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ui.py b/modules/ui.py index 0a63e357..62a2f4f3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1812,6 +1812,7 @@ Requested path was: {f} settings_map = { 'sd_hypernetwork': 'Hypernet', + 'sd_hypernetwork_strength': 'Hypernetwork strength', 'CLIP_stop_at_last_layers': 'Clip skip', 'sd_model_checkpoint': 'Model hash', } From c0677b33161f04c3ed1a7a78f4c7288fb95787b7 Mon Sep 17 00:00:00 2001 From: timntorres Date: Thu, 27 Oct 2022 23:31:45 -0700 Subject: [PATCH 13/71] Explicitly state when Hypernet is none. --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 93066522..74a0cd64 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -328,7 +328,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), - "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name), + "Hypernet": ("None" if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name), "Hypernetwork strength": (None if shared.loaded_hypernetwork is None else shared.opts.sd_hypernetwork_strength), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), From db5a354c489bfd1c95e0bbf9af12ab8b5d6fe170 Mon Sep 17 00:00:00 2001 From: timntorres Date: Fri, 28 Oct 2022 01:41:57 -0700 Subject: [PATCH 14/71] Always ignore "None.pt" in the hypernet directory. --- modules/hypernetworks/hypernetwork.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 8113b35b..cd920df5 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -208,13 +208,16 @@ def list_hypernetworks(path): res = {} for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True): name = os.path.splitext(os.path.basename(filename))[0] - res[name] = filename + # Prevent a hypothetical "None.pt" from being listed. + if name != "None": + res[name] = filename return res def load_hypernetwork(filename): path = shared.hypernetworks.get(filename, None) - if path is not None: + # Prevent any file named "None.pt" from being loaded. + if path is not None and filename != "None": print(f"Loading hypernetwork {filename}") try: shared.loaded_hypernetwork = Hypernetwork() From 2c4d20388425a5e40b93eef3722e42e8d375fbb4 Mon Sep 17 00:00:00 2001 From: timntorres Date: Sat, 29 Oct 2022 00:36:51 -0700 Subject: [PATCH 15/71] Revert "Explicitly state when Hypernet is none." --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 377c0978..04fdda7c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -395,7 +395,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), - "Hypernet": ("None" if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name), + "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name), "Hypernetwork strength": (None if shared.loaded_hypernetwork is None else shared.opts.sd_hypernetwork_strength), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), From a5f3adbdd7d9b8245f7782216ac48913660e6bb5 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 29 Oct 2022 15:37:24 +0700 Subject: [PATCH 16/71] Allow trailing comma in learning rate --- modules/textual_inversion/learn_schedule.py | 33 +++++++++++++-------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py index 3a736065..76e611b6 100644 --- a/modules/textual_inversion/learn_schedule.py +++ b/modules/textual_inversion/learn_schedule.py @@ -11,23 +11,30 @@ class LearnScheduleIterator: self.rates = [] self.it = 0 self.maxit = 0 - for i, pair in enumerate(pairs): - tmp = pair.split(':') - if len(tmp) == 2: - step = int(tmp[1]) - if step > cur_step: - self.rates.append((float(tmp[0]), min(step, max_steps))) - self.maxit += 1 - if step > max_steps: + try: + for i, pair in enumerate(pairs): + if not pair.strip(): + continue + tmp = pair.split(':') + if len(tmp) == 2: + step = int(tmp[1]) + if step > cur_step: + self.rates.append((float(tmp[0]), min(step, max_steps))) + self.maxit += 1 + if step > max_steps: + return + elif step == -1: + self.rates.append((float(tmp[0]), max_steps)) + self.maxit += 1 return - elif step == -1: + else: self.rates.append((float(tmp[0]), max_steps)) self.maxit += 1 return - else: - self.rates.append((float(tmp[0]), max_steps)) - self.maxit += 1 - return + assert self.rates + except (ValueError, AssertionError): + raise Exception("Invalid learning rate schedule") + def __iter__(self): return self From ef4c94e1cfe66299227aa95a28c2380d21cb1600 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 29 Oct 2022 15:42:51 +0700 Subject: [PATCH 17/71] Improve lr schedule error message --- modules/textual_inversion/learn_schedule.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py index 76e611b6..dd0c0ad1 100644 --- a/modules/textual_inversion/learn_schedule.py +++ b/modules/textual_inversion/learn_schedule.py @@ -4,7 +4,7 @@ import tqdm class LearnScheduleIterator: def __init__(self, learn_rate, max_steps, cur_step=0): """ - specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, 1e-5:10000 until 10000 + specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000 """ pairs = learn_rate.split(',') @@ -33,7 +33,7 @@ class LearnScheduleIterator: return assert self.rates except (ValueError, AssertionError): - raise Exception("Invalid learning rate schedule") + raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.') def __iter__(self): From ab27c111d06ec920791c73eea25ad9a61671852e Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 29 Oct 2022 18:09:17 +0700 Subject: [PATCH 18/71] Add input validations before loading dataset for training --- modules/hypernetworks/hypernetwork.py | 38 ++++++++------ .../textual_inversion/textual_inversion.py | 50 ++++++++++++++----- 2 files changed, 59 insertions(+), 29 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 2e84583b..38f35c58 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -332,7 +332,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log # images allows training previews to have infotext. Importing it at the top causes a circular import problem. from modules import images - assert hypernetwork_name, 'hypernetwork not selected' + save_hypernetwork_every = save_hypernetwork_every or 0 + create_image_every = create_image_every or 0 + textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork") path = shared.hypernetworks.get(hypernetwork_name, None) shared.loaded_hypernetwork = Hypernetwork() @@ -358,39 +360,43 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log else: images_dir = None + hypernetwork = shared.loaded_hypernetwork + + ititial_step = hypernetwork.step or 0 + if ititial_step > steps: + shared.state.textinfo = f"Model has already been trained beyond specified max steps" + return hypernetwork, filename + + scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) + + # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." with torch.autocast("cuda"): ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size) + if unload: shared.sd_model.cond_stage_model.to(devices.cpu) shared.sd_model.first_stage_model.to(devices.cpu) - hypernetwork = shared.loaded_hypernetwork - weights = hypernetwork.weights() - for weight in weights: - weight.requires_grad = True - size = len(ds.indexes) loss_dict = defaultdict(lambda : deque(maxlen = 1024)) losses = torch.zeros((size,)) previous_mean_losses = [0] previous_mean_loss = 0 print("Mean loss of {} elements".format(size)) - - last_saved_file = "" - last_saved_image = "" - forced_filename = "" - - ititial_step = hypernetwork.step or 0 - if ititial_step > steps: - return hypernetwork, filename - - scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) + + weights = hypernetwork.weights() + for weight in weights: + weight.requires_grad = True # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc... optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) steps_without_grad = 0 + last_saved_file = "" + last_saved_image = "" + forced_filename = "" + pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) for i, entries in pbar: hypernetwork.step = i + ititial_step diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 17dfb223..44f06443 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -204,9 +204,30 @@ def write_loss(log_directory, filename, step, epoch_len, values): **values, }) +def validate_train_inputs(model_name, learn_rate, batch_size, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"): + assert model_name, f"{name} not selected" + assert learn_rate, "Learning rate is empty or 0" + assert isinstance(batch_size, int), "Batch size must be integer" + assert batch_size > 0, "Batch size must be positive" + assert data_root, "Dataset directory is empty" + assert os.path.isdir(data_root), "Dataset directory doesn't exist" + assert os.listdir(data_root), "Dataset directory is empty" + assert template_file, "Prompt template file is empty" + assert os.path.isfile(template_file), "Prompt template file doesn't exist" + assert steps, "Max steps is empty or 0" + assert isinstance(steps, int), "Max steps must be integer" + assert steps > 0 , "Max steps must be positive" + assert isinstance(save_model_every, int), "Save {name} must be integer" + assert save_model_every >= 0 , "Save {name} must be positive or 0" + assert isinstance(create_image_every, int), "Create image must be integer" + assert create_image_every >= 0 , "Create image must be positive or 0" + if save_model_every or create_image_every: + assert log_directory, "Log directory is empty" def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): - assert embedding_name, 'embedding not selected' + save_embedding_every = save_embedding_every or 0 + create_image_every = create_image_every or 0 + validate_train_inputs(embedding_name, learn_rate, batch_size, data_root, template_file, steps, save_embedding_every, create_image_every, log_directory, name="embedding") shared.state.textinfo = "Initializing textual inversion training..." shared.state.job_count = steps @@ -232,17 +253,27 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc os.makedirs(images_embeds_dir, exist_ok=True) else: images_embeds_dir = None - - cond_model = shared.sd_model.cond_stage_model - shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." - with torch.autocast("cuda"): - ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size) + cond_model = shared.sd_model.cond_stage_model hijack = sd_hijack.model_hijack embedding = hijack.embedding_db.word_embeddings[embedding_name] + + ititial_step = embedding.step or 0 + if ititial_step > steps: + shared.state.textinfo = f"Model has already been trained beyond specified max steps" + return embedding, filename + + scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) + + # dataset loading may take a while, so input validations and early returns should be done before this + shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." + with torch.autocast("cuda"): + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size) + embedding.vec.requires_grad = True + optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) losses = torch.zeros((32,)) @@ -251,13 +282,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc forced_filename = "" embedding_yet_to_be_embedded = False - ititial_step = embedding.step or 0 - if ititial_step > steps: - return embedding, filename - - scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) - optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) - pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) for i, entries in pbar: embedding.step = i + ititial_step From 3ce2bfdf95bd5f26d0f6e250e67338ada91980d1 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 29 Oct 2022 19:43:21 +0700 Subject: [PATCH 19/71] Add cleanup after training --- modules/hypernetworks/hypernetwork.py | 187 +++++++++--------- .../textual_inversion/textual_inversion.py | 163 +++++++-------- 2 files changed, 182 insertions(+), 168 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 38f35c58..170d5ea4 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -398,110 +398,112 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log forced_filename = "" pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) - for i, entries in pbar: - hypernetwork.step = i + ititial_step - if len(loss_dict) > 0: - previous_mean_losses = [i[-1] for i in loss_dict.values()] - previous_mean_loss = mean(previous_mean_losses) - - scheduler.apply(optimizer, hypernetwork.step) - if scheduler.finished: - break - if shared.state.interrupted: - break - - with torch.autocast("cuda"): - c = stack_conds([entry.cond for entry in entries]).to(devices.device) - # c = torch.vstack([entry.cond for entry in entries]).to(devices.device) - x = torch.stack([entry.latent for entry in entries]).to(devices.device) - loss = shared.sd_model(x, c)[0] - del x - del c - - losses[hypernetwork.step % losses.shape[0]] = loss.item() - for entry in entries: - loss_dict[entry.filename].append(loss.item()) + try: + for i, entries in pbar: + hypernetwork.step = i + ititial_step + if len(loss_dict) > 0: + previous_mean_losses = [i[-1] for i in loss_dict.values()] + previous_mean_loss = mean(previous_mean_losses) - optimizer.zero_grad() - weights[0].grad = None - loss.backward() + scheduler.apply(optimizer, hypernetwork.step) + if scheduler.finished: + break - if weights[0].grad is None: - steps_without_grad += 1 + if shared.state.interrupted: + break + + with torch.autocast("cuda"): + c = stack_conds([entry.cond for entry in entries]).to(devices.device) + # c = torch.vstack([entry.cond for entry in entries]).to(devices.device) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) + loss = shared.sd_model(x, c)[0] + del x + del c + + losses[hypernetwork.step % losses.shape[0]] = loss.item() + for entry in entries: + loss_dict[entry.filename].append(loss.item()) + + optimizer.zero_grad() + weights[0].grad = None + loss.backward() + + if weights[0].grad is None: + steps_without_grad += 1 + else: + steps_without_grad = 0 + assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' + + optimizer.step() + + steps_done = hypernetwork.step + 1 + + if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): + raise RuntimeError("Loss diverged.") + + if len(previous_mean_losses) > 1: + std = stdev(previous_mean_losses) else: - steps_without_grad = 0 - assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' + std = 0 + dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})" + pbar.set_description(dataset_loss_info) - optimizer.step() + if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: + # Before saving, change name to match current checkpoint. + hypernetwork.name = f'{hypernetwork_name}-{steps_done}' + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt') + hypernetwork.save(last_saved_file) - steps_done = hypernetwork.step + 1 + textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { + "loss": f"{previous_mean_loss:.7f}", + "learn_rate": scheduler.learn_rate + }) - if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): - raise RuntimeError("Loss diverged.") - - if len(previous_mean_losses) > 1: - std = stdev(previous_mean_losses) - else: - std = 0 - dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})" - pbar.set_description(dataset_loss_info) + if images_dir is not None and steps_done % create_image_every == 0: + forced_filename = f'{hypernetwork_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) - if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: - # Before saving, change name to match current checkpoint. - hypernetwork.name = f'{hypernetwork_name}-{steps_done}' - last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt') - hypernetwork.save(last_saved_file) + optimizer.zero_grad() + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) - textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { - "loss": f"{previous_mean_loss:.7f}", - "learn_rate": scheduler.learn_rate - }) + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + ) - if images_dir is not None and steps_done % create_image_every == 0: - forced_filename = f'{hypernetwork_name}-{steps_done}' - last_saved_image = os.path.join(images_dir, forced_filename) + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entries[0].cond_text + p.steps = 20 - optimizer.zero_grad() - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) + preview_text = p.prompt - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - ) + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images)>0 else None - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_index = preview_sampler_index - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = entries[0].cond_text - p.steps = 20 + if unload: + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) - preview_text = p.prompt + if image is not None: + shared.state.current_image = image + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" - processed = processing.process_images(p) - image = processed.images[0] if len(processed.images)>0 else None + shared.state.job_no = hypernetwork.step - if unload: - shared.sd_model.cond_stage_model.to(devices.cpu) - shared.sd_model.first_stage_model.to(devices.cpu) - - if image is not None: - shared.state.current_image = image - last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) - last_saved_image += f", prompt: {preview_text}" - - shared.state.job_no = hypernetwork.step - - shared.state.textinfo = f""" + shared.state.textinfo = f"""

Loss: {previous_mean_loss:.7f}
Step: {hypernetwork.step}
@@ -510,7 +512,14 @@ Last saved hypernetwork: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

""" - + finally: + if weights: + for weight in weights: + weight.requires_grad = False + if unload: + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) + report_statistics(loss_dict) checkpoint = sd_models.select_checkpoint() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 44f06443..fd7f0897 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -283,111 +283,113 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc embedding_yet_to_be_embedded = False pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) - for i, entries in pbar: - embedding.step = i + ititial_step - scheduler.apply(optimizer, embedding.step) - if scheduler.finished: - break + try: + for i, entries in pbar: + embedding.step = i + ititial_step - if shared.state.interrupted: - break + scheduler.apply(optimizer, embedding.step) + if scheduler.finished: + break - with torch.autocast("cuda"): - c = cond_model([entry.cond_text for entry in entries]) - x = torch.stack([entry.latent for entry in entries]).to(devices.device) - loss = shared.sd_model(x, c)[0] - del x + if shared.state.interrupted: + break - losses[embedding.step % losses.shape[0]] = loss.item() + with torch.autocast("cuda"): + c = cond_model([entry.cond_text for entry in entries]) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) + loss = shared.sd_model(x, c)[0] + del x - optimizer.zero_grad() - loss.backward() - optimizer.step() + losses[embedding.step % losses.shape[0]] = loss.item() - steps_done = embedding.step + 1 + optimizer.zero_grad() + loss.backward() + optimizer.step() - epoch_num = embedding.step // len(ds) - epoch_step = embedding.step % len(ds) + steps_done = embedding.step + 1 - pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}") + epoch_num = embedding.step // len(ds) + epoch_step = embedding.step % len(ds) - if embedding_dir is not None and steps_done % save_embedding_every == 0: - # Before saving, change name to match current checkpoint. - embedding.name = f'{embedding_name}-{steps_done}' - last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') - embedding.save(last_saved_file) - embedding_yet_to_be_embedded = True + pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}") - write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { - "loss": f"{losses.mean():.7f}", - "learn_rate": scheduler.learn_rate - }) + if embedding_dir is not None and steps_done % save_embedding_every == 0: + # Before saving, change name to match current checkpoint. + embedding.name = f'{embedding_name}-{steps_done}' + last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') + embedding.save(last_saved_file) + embedding_yet_to_be_embedded = True - if images_dir is not None and steps_done % create_image_every == 0: - forced_filename = f'{embedding_name}-{steps_done}' - last_saved_image = os.path.join(images_dir, forced_filename) - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - do_not_reload_embeddings=True, - ) + write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { + "loss": f"{losses.mean():.7f}", + "learn_rate": scheduler.learn_rate + }) - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_index = preview_sampler_index - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = entries[0].cond_text - p.steps = 20 - p.width = training_width - p.height = training_height + if images_dir is not None and steps_done % create_image_every == 0: + forced_filename = f'{embedding_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + do_not_reload_embeddings=True, + ) - preview_text = p.prompt + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entries[0].cond_text + p.steps = 20 + p.width = training_width + p.height = training_height - processed = processing.process_images(p) - image = processed.images[0] + preview_text = p.prompt - shared.state.current_image = image + processed = processing.process_images(p) + image = processed.images[0] - if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: + shared.state.current_image = image - last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png') + if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: - info = PngImagePlugin.PngInfo() - data = torch.load(last_saved_file) - info.add_text("sd-ti-embedding", embedding_to_b64(data)) + last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png') - title = "<{}>".format(data.get('name', '???')) + info = PngImagePlugin.PngInfo() + data = torch.load(last_saved_file) + info.add_text("sd-ti-embedding", embedding_to_b64(data)) - try: - vectorSize = list(data['string_to_param'].values())[0].shape[0] - except Exception as e: - vectorSize = '?' + title = "<{}>".format(data.get('name', '???')) - checkpoint = sd_models.select_checkpoint() - footer_left = checkpoint.model_name - footer_mid = '[{}]'.format(checkpoint.hash) - footer_right = '{}v {}s'.format(vectorSize, steps_done) + try: + vectorSize = list(data['string_to_param'].values())[0].shape[0] + except Exception as e: + vectorSize = '?' - captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) - captioned_image = insert_image_data_embed(captioned_image, data) + checkpoint = sd_models.select_checkpoint() + footer_left = checkpoint.model_name + footer_mid = '[{}]'.format(checkpoint.hash) + footer_right = '{}v {}s'.format(vectorSize, steps_done) - captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) - embedding_yet_to_be_embedded = False + captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) + captioned_image = insert_image_data_embed(captioned_image, data) - last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) - last_saved_image += f", prompt: {preview_text}" + captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) + embedding_yet_to_be_embedded = False - shared.state.job_no = embedding.step + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" - shared.state.textinfo = f""" + shared.state.job_no = embedding.step + + shared.state.textinfo = f"""

Loss: {losses.mean():.7f}
Step: {embedding.step}
@@ -396,6 +398,9 @@ Last saved embedding: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

""" + finally: + if embedding and embedding.vec is not None: + embedding.vec.requires_grad = False checkpoint = sd_models.select_checkpoint() From a27d19de2eff633b6a39f9f4a5c0f2d6abb81bb5 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 29 Oct 2022 19:44:05 +0700 Subject: [PATCH 20/71] Additional assert on dataset --- modules/textual_inversion/dataset.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 8bb00d27..ad726577 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -42,6 +42,8 @@ class PersonalizedBase(Dataset): self.lines = lines assert data_root, 'dataset directory not specified' + assert os.path.isdir(data_root), "Dataset directory doesn't exist" + assert os.listdir(data_root), "Dataset directory is empty" cond_model = shared.sd_model.cond_stage_model From cbdb5ced767d2f82296ecf22feb262870acba6f3 Mon Sep 17 00:00:00 2001 From: Dynamic Date: Sat, 29 Oct 2022 22:33:51 +0900 Subject: [PATCH 21/71] Add new translations New settings option New extras tab option --- localizations/ko_KR.json | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/localizations/ko_KR.json b/localizations/ko_KR.json index 3d631066..8f5f155b 100644 --- a/localizations/ko_KR.json +++ b/localizations/ko_KR.json @@ -202,6 +202,7 @@ "Inpaint at full resolution padding, pixels": "전체 해상도로 인페인트시 패딩값(픽셀 단위)", "Inpaint masked": "마스크만 처리", "Inpaint not masked": "마스크 이외만 처리", + "Inpainting conditioning mask strength": "인페인팅 조절 마스크 강도", "Input directory": "인풋 이미지 경로", "Input images directory": "이미지 경로 입력", "Interpolation Method": "보간 방법", @@ -276,6 +277,7 @@ "Number of repeats for a single input image per epoch; used only for displaying epoch number": "세대(Epoch)당 단일 인풋 이미지의 반복 횟수 - 세대(Epoch) 숫자를 표시하는 데에만 사용됩니다. ", "Number of rows on the page": "각 페이지마다 표시할 세로줄 수", "Number of vectors per token": "토큰별 벡터 수", + "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "인페인팅 모델에만 적용됩니다. 인페인팅과 이미지→이미지에서 원본 이미지를 얼마나 마스킹 처리할지 결정하는 값입니다. 1.0은 완전히 마스킹함(기본 설정)을 의미하고, 0.0은 완전히 언마스킹된 이미지를 의미합니다. 낮은 값일수록 이미지의 전체적인 구성을 유지하는 데에 도움되겠지만, 변화량이 많을수록 불안정해집니다.", "Open for Clip Aesthetic!": "클립 스타일 기능을 활성화하려면 클릭!", "Open images output directory": "이미지 저장 경로 열기", "Open output directory": "저장 경로 열기", @@ -390,6 +392,7 @@ "Select activation function of hypernetwork": "하이퍼네트워크 활성화 함수 선택", "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "레이어 가중치 초기화 방식 선택 - relu류 : Kaiming 추천, sigmoid류 : Xavier 추천", "Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "WebUI에 표시할 Real-ESRGAN 모델을 선택하십시오. (재시작 필요)", + "Send seed when sending prompt or image to other interface": "다른 화면으로 프롬프트나 이미지를 보낼 때 시드도 함께 보내기", "Send to extras": "부가기능으로 전송", "Send to img2img": "이미지→이미지로 전송", "Send to inpaint": "인페인트로 전송", @@ -467,6 +470,7 @@ "up": "위쪽", "Upload mask": "마스크 업로드하기", "Upload prompt inputs": "입력할 프롬프트를 업로드하십시오", + "Upscale Before Restoring Faces": "얼굴 보정을 진행하기 전에 업스케일링 먼저 진행하기", "Upscale latent space image when doing hires. fix": "고해상도 보정 사용시 잠재 공간 이미지 업스케일하기", "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "마스크된 부분을 설정된 해상도로 업스케일하고, 인페인팅을 진행한 뒤, 다시 다운스케일 후 원본 이미지에 붙여넣습니다.", "Upscaler": "업스케일러", From de1dc0d279a877d5d9f512befe30a7d7e5cf3881 Mon Sep 17 00:00:00 2001 From: Martin Cairns <4314538+MartinCairnsSQL@users.noreply.github.com> Date: Sat, 29 Oct 2022 15:23:19 +0100 Subject: [PATCH 22/71] Add adjust_steps_if_invalid to find next valid step for ddim uniform sampler --- modules/sd_samplers.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 3670b57d..aca014e8 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,5 +1,6 @@ from collections import namedtuple import numpy as np +from math import floor import torch import tqdm from PIL import Image @@ -205,17 +206,22 @@ class VanillaStableDiffusionSampler: self.mask = p.mask if hasattr(p, 'mask') else None self.nmask = p.nmask if hasattr(p, 'nmask') else None + + def adjust_steps_if_invalid(self, p, num_steps): + if self.config.name == 'DDIM' and p.ddim_discretize == 'uniform': + valid_step = 999 / (1000 // num_steps) + if valid_step == floor(valid_step): + return int(valid_step) + 1 + + return num_steps + + def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): steps, t_enc = setup_img2img_steps(p, steps) - + steps = self.adjust_steps_if_invalid(p, steps) self.initialize(p) - # existing code fails with certain step counts, like 9 - try: - self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False) - except Exception: - self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False) - + self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False) x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise) self.init_latent = x @@ -239,18 +245,14 @@ class VanillaStableDiffusionSampler: self.last_latent = x self.step = 0 - steps = steps or p.steps + steps = self.adjust_steps_if_invalid(p, steps or p.steps) # Wrap the conditioning models with additional image conditioning for inpainting model if image_conditioning is not None: conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]} unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]} - # existing code fails with certain step counts, like 9 - try: - samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) - except Exception: - samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) + samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0]) return samples_ddim From af45b5a11a8a393ed404be3ce0ecac14338155c7 Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Sat, 29 Oct 2022 18:26:28 +0300 Subject: [PATCH 23/71] Testing with API added --- run_tests.bat | 15 +++++++++ test/__init__.py | 0 test/server_poll.py | 17 ++++++++++ test/txt2img_test.py | 80 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 112 insertions(+) create mode 100644 run_tests.bat create mode 100644 test/__init__.py create mode 100644 test/server_poll.py create mode 100644 test/txt2img_test.py diff --git a/run_tests.bat b/run_tests.bat new file mode 100644 index 00000000..3a63f034 --- /dev/null +++ b/run_tests.bat @@ -0,0 +1,15 @@ +@echo off +set ERROR_REPORTING=FALSE +set COMMANDLINE_ARGS= --api +echo Launching SDWebUI... +start "SDWebUITest" webui.bat + +if not defined PYTHON (set PYTHON=python) +if not defined VENV_DIR (set VENV_DIR=venv) +set PYTHON="%~dp0%VENV_DIR%\Scripts\Python.exe" +%PYTHON% test/server_poll.py +for /f "tokens=2 delims=," %%a in ('tasklist /v /fo csv ^| findstr /i "SDWebUITest"') do set "$PID=%%a" + +taskkill /PID %$PID% >nul 2>&1 + +pause diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/server_poll.py b/test/server_poll.py new file mode 100644 index 00000000..8c0436f8 --- /dev/null +++ b/test/server_poll.py @@ -0,0 +1,17 @@ +import unittest +import requests +import time + +timeout_threshold = 240 +start_time = time.time() +while time.time()-start_time < timeout_threshold: + try: + requests.head("http://localhost:7860/") + break + except requests.exceptions.ConnectionError: + pass +if time.time()-start_time < timeout_threshold: + suite = unittest.TestLoader().discover('', pattern='*_test.py') + result = unittest.TextTestRunner(verbosity=2).run(suite) +else: + print("Launch unsuccessful") diff --git a/test/txt2img_test.py b/test/txt2img_test.py new file mode 100644 index 00000000..9484fd99 --- /dev/null +++ b/test/txt2img_test.py @@ -0,0 +1,80 @@ +import unittest +import requests +import time + +url_txt2img = "http://localhost:7860/sdapi/v1/txt2img" +simple_txt2img = { + "enable_hr": False, + "denoising_strength": 0, + "firstphase_width": 0, + "firstphase_height": 0, + "prompt": "example prompt", + "styles": [ + "" + ], + "seed": -1, + "subseed": -1, + "subseed_strength": 0, + "seed_resize_from_h": -1, + "seed_resize_from_w": -1, + "batch_size": 1, + "n_iter": 1, + "steps": 5, + "cfg_scale": 7, + "width": 64, + "height": 64, + "restore_faces": False, + "tiling": False, + "negative_prompt": "", + "eta": 0, + "s_churn": 0, + "s_tmax": 0, + "s_tmin": 0, + "s_noise": 1, + "sampler_index": "Euler a" +} + +class TestTxt2ImgWorking(unittest.TestCase): + def test_txt2img_simple_performed(self): + self.assertEqual(requests.post(url_txt2img, json=simple_txt2img).status_code, 200) + + def test_txt2img_with_negative_prompt_performed(self): + params = simple_txt2img.copy() + params["negative_prompt"] = "example negative prompt" + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_not_square_image_performed(self): + params = simple_txt2img.copy() + params["height"] = 128 + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_with_hrfix_performed(self): + params = simple_txt2img.copy() + params["enable_hr"] = True + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_with_restore_faces_performed(self): + params = simple_txt2img.copy() + params["restore_faces"] = True + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_with_tiling_faces_performed(self): + params = simple_txt2img.copy() + params["tiling"] = True + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_with_vanilla_sampler_performed(self): + params = simple_txt2img.copy() + params["sampler_index"] = "PLMS" + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + + def test_txt2img_multiple_batches_performed(self): + params = simple_txt2img.copy() + params["n_iter"] = 2 + self.assertEqual(requests.post(url_txt2img, json=params).status_code, 200) + +class TestTxt2ImgCorrectness(unittest.TestCase): + pass + +if __name__ == "__main__": + unittest.main() From 6515dedf57db6a46c7a85b0b7a1eee0daad90123 Mon Sep 17 00:00:00 2001 From: Mackerel Date: Mon, 24 Oct 2022 10:13:25 -0400 Subject: [PATCH 24/71] webui.sh: no automatic git pull --- webui.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/webui.sh b/webui.sh index a9f85d89..cc06f344 100755 --- a/webui.sh +++ b/webui.sh @@ -102,15 +102,14 @@ then exit 1 fi -printf "\n%s\n" "${delimiter}" -printf "Clone or update stable-diffusion-webui" -printf "\n%s\n" "${delimiter}" cd "${install_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/, aborting...\e[0m" "${install_dir}"; exit 1; } if [[ -d "${clone_dir}" ]] then cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; } - "${GIT}" pull else + printf "\n%s\n" "${delimiter}" + printf "Clone stable-diffusion-webui" + printf "\n%s\n" "${delimiter}" "${GIT}" clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git "${clone_dir}" cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; } fi From ab05a74ead9fabb45dd099990e34061c7eb02ca3 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sun, 30 Oct 2022 00:32:02 +0700 Subject: [PATCH 25/71] Revert "Add cleanup after training" This reverts commit 3ce2bfdf95bd5f26d0f6e250e67338ada91980d1. --- modules/hypernetworks/hypernetwork.py | 191 +++++++++--------- .../textual_inversion/textual_inversion.py | 163 ++++++++------- 2 files changed, 170 insertions(+), 184 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 170d5ea4..38f35c58 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -398,112 +398,110 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log forced_filename = "" pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) - - try: - for i, entries in pbar: - hypernetwork.step = i + ititial_step - if len(loss_dict) > 0: - previous_mean_losses = [i[-1] for i in loss_dict.values()] - previous_mean_loss = mean(previous_mean_losses) - - scheduler.apply(optimizer, hypernetwork.step) - if scheduler.finished: - break - - if shared.state.interrupted: - break - - with torch.autocast("cuda"): - c = stack_conds([entry.cond for entry in entries]).to(devices.device) - # c = torch.vstack([entry.cond for entry in entries]).to(devices.device) - x = torch.stack([entry.latent for entry in entries]).to(devices.device) - loss = shared.sd_model(x, c)[0] - del x - del c - - losses[hypernetwork.step % losses.shape[0]] = loss.item() - for entry in entries: - loss_dict[entry.filename].append(loss.item()) - - optimizer.zero_grad() - weights[0].grad = None - loss.backward() - - if weights[0].grad is None: - steps_without_grad += 1 - else: - steps_without_grad = 0 - assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' - - optimizer.step() - - steps_done = hypernetwork.step + 1 - - if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): - raise RuntimeError("Loss diverged.") + for i, entries in pbar: + hypernetwork.step = i + ititial_step + if len(loss_dict) > 0: + previous_mean_losses = [i[-1] for i in loss_dict.values()] + previous_mean_loss = mean(previous_mean_losses) - if len(previous_mean_losses) > 1: - std = stdev(previous_mean_losses) + scheduler.apply(optimizer, hypernetwork.step) + if scheduler.finished: + break + + if shared.state.interrupted: + break + + with torch.autocast("cuda"): + c = stack_conds([entry.cond for entry in entries]).to(devices.device) + # c = torch.vstack([entry.cond for entry in entries]).to(devices.device) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) + loss = shared.sd_model(x, c)[0] + del x + del c + + losses[hypernetwork.step % losses.shape[0]] = loss.item() + for entry in entries: + loss_dict[entry.filename].append(loss.item()) + + optimizer.zero_grad() + weights[0].grad = None + loss.backward() + + if weights[0].grad is None: + steps_without_grad += 1 else: - std = 0 - dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})" - pbar.set_description(dataset_loss_info) + steps_without_grad = 0 + assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' - if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: - # Before saving, change name to match current checkpoint. - hypernetwork.name = f'{hypernetwork_name}-{steps_done}' - last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt') - hypernetwork.save(last_saved_file) + optimizer.step() - textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { - "loss": f"{previous_mean_loss:.7f}", - "learn_rate": scheduler.learn_rate - }) + steps_done = hypernetwork.step + 1 - if images_dir is not None and steps_done % create_image_every == 0: - forced_filename = f'{hypernetwork_name}-{steps_done}' - last_saved_image = os.path.join(images_dir, forced_filename) + if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): + raise RuntimeError("Loss diverged.") + + if len(previous_mean_losses) > 1: + std = stdev(previous_mean_losses) + else: + std = 0 + dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})" + pbar.set_description(dataset_loss_info) - optimizer.zero_grad() - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) + if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: + # Before saving, change name to match current checkpoint. + hypernetwork.name = f'{hypernetwork_name}-{steps_done}' + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt') + hypernetwork.save(last_saved_file) - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - ) + textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { + "loss": f"{previous_mean_loss:.7f}", + "learn_rate": scheduler.learn_rate + }) - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_index = preview_sampler_index - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = entries[0].cond_text - p.steps = 20 + if images_dir is not None and steps_done % create_image_every == 0: + forced_filename = f'{hypernetwork_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) - preview_text = p.prompt + optimizer.zero_grad() + shared.sd_model.cond_stage_model.to(devices.device) + shared.sd_model.first_stage_model.to(devices.device) - processed = processing.process_images(p) - image = processed.images[0] if len(processed.images)>0 else None + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + ) - if unload: - shared.sd_model.cond_stage_model.to(devices.cpu) - shared.sd_model.first_stage_model.to(devices.cpu) + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entries[0].cond_text + p.steps = 20 - if image is not None: - shared.state.current_image = image - last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) - last_saved_image += f", prompt: {preview_text}" + preview_text = p.prompt - shared.state.job_no = hypernetwork.step + processed = processing.process_images(p) + image = processed.images[0] if len(processed.images)>0 else None - shared.state.textinfo = f""" + if unload: + shared.sd_model.cond_stage_model.to(devices.cpu) + shared.sd_model.first_stage_model.to(devices.cpu) + + if image is not None: + shared.state.current_image = image + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" + + shared.state.job_no = hypernetwork.step + + shared.state.textinfo = f"""

Loss: {previous_mean_loss:.7f}
Step: {hypernetwork.step}
@@ -512,14 +510,7 @@ Last saved hypernetwork: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

""" - finally: - if weights: - for weight in weights: - weight.requires_grad = False - if unload: - shared.sd_model.cond_stage_model.to(devices.device) - shared.sd_model.first_stage_model.to(devices.device) - + report_statistics(loss_dict) checkpoint = sd_models.select_checkpoint() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index fd7f0897..44f06443 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -283,113 +283,111 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc embedding_yet_to_be_embedded = False pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) + for i, entries in pbar: + embedding.step = i + ititial_step - try: - for i, entries in pbar: - embedding.step = i + ititial_step + scheduler.apply(optimizer, embedding.step) + if scheduler.finished: + break - scheduler.apply(optimizer, embedding.step) - if scheduler.finished: - break + if shared.state.interrupted: + break - if shared.state.interrupted: - break + with torch.autocast("cuda"): + c = cond_model([entry.cond_text for entry in entries]) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) + loss = shared.sd_model(x, c)[0] + del x - with torch.autocast("cuda"): - c = cond_model([entry.cond_text for entry in entries]) - x = torch.stack([entry.latent for entry in entries]).to(devices.device) - loss = shared.sd_model(x, c)[0] - del x + losses[embedding.step % losses.shape[0]] = loss.item() - losses[embedding.step % losses.shape[0]] = loss.item() + optimizer.zero_grad() + loss.backward() + optimizer.step() - optimizer.zero_grad() - loss.backward() - optimizer.step() + steps_done = embedding.step + 1 - steps_done = embedding.step + 1 + epoch_num = embedding.step // len(ds) + epoch_step = embedding.step % len(ds) - epoch_num = embedding.step // len(ds) - epoch_step = embedding.step % len(ds) + pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}") - pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}") + if embedding_dir is not None and steps_done % save_embedding_every == 0: + # Before saving, change name to match current checkpoint. + embedding.name = f'{embedding_name}-{steps_done}' + last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') + embedding.save(last_saved_file) + embedding_yet_to_be_embedded = True - if embedding_dir is not None and steps_done % save_embedding_every == 0: - # Before saving, change name to match current checkpoint. - embedding.name = f'{embedding_name}-{steps_done}' - last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') - embedding.save(last_saved_file) - embedding_yet_to_be_embedded = True + write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { + "loss": f"{losses.mean():.7f}", + "learn_rate": scheduler.learn_rate + }) - write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { - "loss": f"{losses.mean():.7f}", - "learn_rate": scheduler.learn_rate - }) + if images_dir is not None and steps_done % create_image_every == 0: + forced_filename = f'{embedding_name}-{steps_done}' + last_saved_image = os.path.join(images_dir, forced_filename) + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + do_not_save_grid=True, + do_not_save_samples=True, + do_not_reload_embeddings=True, + ) - if images_dir is not None and steps_done % create_image_every == 0: - forced_filename = f'{embedding_name}-{steps_done}' - last_saved_image = os.path.join(images_dir, forced_filename) - p = processing.StableDiffusionProcessingTxt2Img( - sd_model=shared.sd_model, - do_not_save_grid=True, - do_not_save_samples=True, - do_not_reload_embeddings=True, - ) + if preview_from_txt2img: + p.prompt = preview_prompt + p.negative_prompt = preview_negative_prompt + p.steps = preview_steps + p.sampler_index = preview_sampler_index + p.cfg_scale = preview_cfg_scale + p.seed = preview_seed + p.width = preview_width + p.height = preview_height + else: + p.prompt = entries[0].cond_text + p.steps = 20 + p.width = training_width + p.height = training_height - if preview_from_txt2img: - p.prompt = preview_prompt - p.negative_prompt = preview_negative_prompt - p.steps = preview_steps - p.sampler_index = preview_sampler_index - p.cfg_scale = preview_cfg_scale - p.seed = preview_seed - p.width = preview_width - p.height = preview_height - else: - p.prompt = entries[0].cond_text - p.steps = 20 - p.width = training_width - p.height = training_height + preview_text = p.prompt - preview_text = p.prompt + processed = processing.process_images(p) + image = processed.images[0] - processed = processing.process_images(p) - image = processed.images[0] + shared.state.current_image = image - shared.state.current_image = image + if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: - if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded: + last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png') - last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png') + info = PngImagePlugin.PngInfo() + data = torch.load(last_saved_file) + info.add_text("sd-ti-embedding", embedding_to_b64(data)) - info = PngImagePlugin.PngInfo() - data = torch.load(last_saved_file) - info.add_text("sd-ti-embedding", embedding_to_b64(data)) + title = "<{}>".format(data.get('name', '???')) - title = "<{}>".format(data.get('name', '???')) + try: + vectorSize = list(data['string_to_param'].values())[0].shape[0] + except Exception as e: + vectorSize = '?' - try: - vectorSize = list(data['string_to_param'].values())[0].shape[0] - except Exception as e: - vectorSize = '?' + checkpoint = sd_models.select_checkpoint() + footer_left = checkpoint.model_name + footer_mid = '[{}]'.format(checkpoint.hash) + footer_right = '{}v {}s'.format(vectorSize, steps_done) - checkpoint = sd_models.select_checkpoint() - footer_left = checkpoint.model_name - footer_mid = '[{}]'.format(checkpoint.hash) - footer_right = '{}v {}s'.format(vectorSize, steps_done) + captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) + captioned_image = insert_image_data_embed(captioned_image, data) - captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right) - captioned_image = insert_image_data_embed(captioned_image, data) + captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) + embedding_yet_to_be_embedded = False - captioned_image.save(last_saved_image_chunks, "PNG", pnginfo=info) - embedding_yet_to_be_embedded = False + last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) + last_saved_image += f", prompt: {preview_text}" - last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False) - last_saved_image += f", prompt: {preview_text}" + shared.state.job_no = embedding.step - shared.state.job_no = embedding.step - - shared.state.textinfo = f""" + shared.state.textinfo = f"""

Loss: {losses.mean():.7f}
Step: {embedding.step}
@@ -398,9 +396,6 @@ Last saved embedding: {html.escape(last_saved_file)}
Last saved image: {html.escape(last_saved_image)}

""" - finally: - if embedding and embedding.vec is not None: - embedding.vec.requires_grad = False checkpoint = sd_models.select_checkpoint() From a07f054c86f33360ff620d6a3fffdee366ab2d99 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sun, 30 Oct 2022 00:49:29 +0700 Subject: [PATCH 26/71] Add missing info on hypernetwork/embedding model log Mentioned here: https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions/1528#discussioncomment-3991513 Also group the saving into one --- modules/hypernetworks/hypernetwork.py | 31 ++++++++++----- .../textual_inversion/textual_inversion.py | 39 ++++++++++++------- 2 files changed, 47 insertions(+), 23 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 38f35c58..86daf825 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -361,6 +361,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log images_dir = None hypernetwork = shared.loaded_hypernetwork + checkpoint = sd_models.select_checkpoint() ititial_step = hypernetwork.step or 0 if ititial_step > steps: @@ -449,9 +450,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0: # Before saving, change name to match current checkpoint. - hypernetwork.name = f'{hypernetwork_name}-{steps_done}' - last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt') - hypernetwork.save(last_saved_file) + hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}' + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt') + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file) textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { "loss": f"{previous_mean_loss:.7f}", @@ -512,13 +513,23 @@ Last saved image: {html.escape(last_saved_image)}
""" report_statistics(loss_dict) - checkpoint = sd_models.select_checkpoint() - hypernetwork.sd_checkpoint = checkpoint.hash - hypernetwork.sd_checkpoint_name = checkpoint.model_name - # Before saving for the last time, change name back to the base name (as opposed to the save_hypernetwork_every step-suffixed naming convention). - hypernetwork.name = hypernetwork_name - filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork.name}.pt') - hypernetwork.save(filename) + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename) return hypernetwork, filename + +def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename): + old_hypernetwork_name = hypernetwork.name + old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None + old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None + try: + hypernetwork.sd_checkpoint = checkpoint.hash + hypernetwork.sd_checkpoint_name = checkpoint.model_name + hypernetwork.name = hypernetwork_name + hypernetwork.save(filename) + except: + hypernetwork.sd_checkpoint = old_sd_checkpoint + hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name + hypernetwork.name = old_hypernetwork_name + raise diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 44f06443..ee9917ce 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -119,7 +119,7 @@ class EmbeddingDatabase: vec = emb.detach().to(devices.device, dtype=torch.float32) embedding = Embedding(vec, name) embedding.step = data.get('step', None) - embedding.sd_checkpoint = data.get('hash', None) + embedding.sd_checkpoint = data.get('sd_checkpoint', None) embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None) self.register_embedding(embedding, shared.sd_model) @@ -259,6 +259,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc hijack = sd_hijack.model_hijack embedding = hijack.embedding_db.word_embeddings[embedding_name] + checkpoint = sd_models.select_checkpoint() ititial_step = embedding.step or 0 if ititial_step > steps: @@ -314,9 +315,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if embedding_dir is not None and steps_done % save_embedding_every == 0: # Before saving, change name to match current checkpoint. - embedding.name = f'{embedding_name}-{steps_done}' - last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt') - embedding.save(last_saved_file) + embedding_name_every = f'{embedding_name}-{steps_done}' + last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt') + save_embedding(embedding, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True) embedding_yet_to_be_embedded = True write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { @@ -397,14 +398,26 @@ Last saved image: {html.escape(last_saved_image)}

""" - checkpoint = sd_models.select_checkpoint() - - embedding.sd_checkpoint = checkpoint.hash - embedding.sd_checkpoint_name = checkpoint.model_name - embedding.cached_checksum = None - # Before saving for the last time, change name back to base name (as opposed to the save_embedding_every step-suffixed naming convention). - embedding.name = embedding_name - filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding.name}.pt') - embedding.save(filename) + filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') + save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True) return embedding, filename + +def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True): + old_embedding_name = embedding.name + old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None + old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None + old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None + try: + embedding.sd_checkpoint = checkpoint.hash + embedding.sd_checkpoint_name = checkpoint.model_name + if remove_cached_checksum: + embedding.cached_checksum = None + embedding.name = embedding_name + embedding.save(filename) + except: + embedding.sd_checkpoint = old_sd_checkpoint + embedding.sd_checkpoint_name = old_sd_checkpoint_name + embedding.name = old_embedding_name + embedding.cached_checksum = old_cached_checksum + raise From 3d58510f214c645ce5cdb261aa47df6573b239e9 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sun, 30 Oct 2022 00:54:59 +0700 Subject: [PATCH 27/71] Fix dataset still being loaded even when training will be skipped --- modules/hypernetworks/hypernetwork.py | 2 +- modules/textual_inversion/textual_inversion.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 86daf825..07acadc9 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -364,7 +364,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log checkpoint = sd_models.select_checkpoint() ititial_step = hypernetwork.step or 0 - if ititial_step > steps: + if ititial_step >= steps: shared.state.textinfo = f"Model has already been trained beyond specified max steps" return hypernetwork, filename diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index ee9917ce..e0babb46 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -262,7 +262,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc checkpoint = sd_models.select_checkpoint() ititial_step = embedding.step or 0 - if ititial_step > steps: + if ititial_step >= steps: shared.state.textinfo = f"Model has already been trained beyond specified max steps" return embedding, filename From 2f3d8172c3fe80ea64ebe5c797835ca15c2e595f Mon Sep 17 00:00:00 2001 From: Vladimir Repin <32306715+mezotaken@users.noreply.github.com> Date: Sat, 29 Oct 2022 21:43:32 +0300 Subject: [PATCH 28/71] img2img test template and setUp added --- test/extras_test.py | 0 test/img2img_test.py | 59 +++++++++++++++++ test/test_files/img2img_basic.png | Bin 0 -> 9932 bytes test/test_files/mask_basic.png | Bin 0 -> 362 bytes test/txt2img_test.py | 102 ++++++++++++++---------------- 5 files changed, 106 insertions(+), 55 deletions(-) create mode 100644 test/extras_test.py create mode 100644 test/img2img_test.py create mode 100644 test/test_files/img2img_basic.png create mode 100644 test/test_files/mask_basic.png diff --git a/test/extras_test.py b/test/extras_test.py new file mode 100644 index 00000000..e69de29b diff --git a/test/img2img_test.py b/test/img2img_test.py new file mode 100644 index 00000000..d8ed309d --- /dev/null +++ b/test/img2img_test.py @@ -0,0 +1,59 @@ +import unittest +import requests +from gradio.processing_utils import encode_pil_to_base64 +from PIL import Image + +class Img2ImgWorking(unittest.TestCase): + def setUp(self): + self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" + self.simple_img2img = { + "init_images": [ + encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")) + ], + "resize_mode": 0, + "denoising_strength": 0.75, + "mask": None, + "mask_blur": 4, + "inpainting_fill": 0, + "inpaint_full_res": False, + "inpaint_full_res_padding": 0, + "inpainting_mask_invert": 0, + "prompt": "example prompt", + "styles": [ + "" + ], + "seed": -1, + "subseed": -1, + "subseed_strength": 0, + "seed_resize_from_h": -1, + "seed_resize_from_w": -1, + "batch_size": 1, + "n_iter": 1, + "steps": 3, + "cfg_scale": 7, + "width": 64, + "height": 64, + "restore_faces": False, + "tiling": False, + "negative_prompt": "", + "eta": 0, + "s_churn": 0, + "s_tmax": 0, + "s_tmin": 0, + "s_noise": 1, + "override_settings": {}, + "sampler_index": "Euler a", + "include_init_images": False + } + def test_img2img_simple_performed(self): + self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) + + def test_inpainting_masked_performed(self): + self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png")) + self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200) + +class TestImg2ImgCorrectness(unittest.TestCase): + pass + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/test/test_files/img2img_basic.png b/test/test_files/img2img_basic.png new file mode 100644 index 0000000000000000000000000000000000000000..49a420482d0a70b9f5986d776a66cb3ea39d1a97 GIT binary patch literal 9932 zcmV;-CNtTIP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!~g&e!~vBn4jTXf02XvbSaefwW^{L9 za%BK#X=XBTZf77eE;KGOqh3c2001UGNklH_r=gz~MC(X~YJV>_j z0N5CuKm%P6%#>wTlQvx~4yA2Eikl8$B~7W;YFA5}(u9?)5axCfLJ6ca1``j&#=}q^&cjq(o_uuD=^gi8trtf@v-uu75bCmnaXI@QHOD&f({hEF) z%~`-U;pp-wU_ip9Y~L(qtI>g<@w@z!V^8zsm^G(?nvQRdf!Tb2JAW#od~$xX-{sqc zO^z`>mk-JI^-rH{WOkrAoR*)@zM_-M1p3)WyUvbwhJ*dyFaP9}-yU+(@8N^P(~PVp^+4d)qN$oc{D^;Tr@pfJW$x`fdseO+D7 zgtV%OvYRx{KM>RqXM|H}#+ToGS@Syo|GTGaj(-UQ{EuVJp}BopU1qR`$#ZmGb43W~ z?1%`nT@D|O(cB@5l<>M)bBdzo)4e_!w`tzK_J>N>Oi~s~fry(&H?v{ix4PM<(k8Q- zgwbhzT{yO4Qh&-3I@Wv&2f2MBR^5HM#tmY^=Yxhq?q^nJn<*ljTMCzIvTb%Q3xRA* z#CWI6SI3!h7|Ud`aw-p!&}czk>~{|7x!^kFSYJGV9lzjYhJ$cnF6!EV38YO;Ghh}@ zJ7M>np*JK5r`-fgr9r`RrJ@xIIh!bSZM2kHrIg35IiCw*QgdkhP&j@9X;V9*;&bz1 zoKA^J4ZO^}8%D<*$XKob|7L=^BG~b8;Oq0Si8WpVI~_i3+&jPW1XX|wn%HmPiX1|D zL9}&a$i)EFDsG)0jpt8{rr z(>p8<$E_y{5Rc$q^D3O28t24=Xn0x z*?#uPu4!EED?fS_Pq}|(ORdDSn<@0wsGNt zH8=2~D6vW&hQp$DAQ&Qc%>LMiK4Q6@`z?+mcTrK22e`Wr=XPNvivME_g!JcOz_siy zl*F+-RUs!&)p^~Ug=$vW2HF$zd=$p13y>Xi$#P%911?h@$~x2(gMCCck%ss=9~8J( z6iV3Xnfq+(&<|~+o3ma&v2s4Jsqvx}xhX7^tdAGSnla(A+}yI!fBHKXHYNy{$j^z2 z_>miCRwSdu%E?sI7>w(Qrmu+sQ>hT`&Ro0b@_XY#X zVr=7+4a-%kkR-t|Rly@6v!*z3hGD)CW;_uYZaudo`yR*8_Nt|u#Ev-8eaxj?&f$d=g!h$~koz6GUt=>QHnD!=By`v3Sl9EM zk|p&gL9?-3U=Mv|!Y0Ac+QUCTFPg(#=4V|zzZNGblkNO0i%uipYlrzgylGz*&M~bdP>yB^S}R zoOJp_+Rjtvg&O1W>{~8zUQ=WfWt+UNaYJ^mPU>ErlTv(`Uq^91#0O>DY)69W12BaE zBy~=woR}ErW>lXWv@OmTm?$9bv37wWm@AhLEnltLMpCqAR|@uQGiNU^^=R&n3%I>dsd*(Smdhvv1xipRmsQ)X zH7n*zZfzyROdK&L0i_T>sec$F6G%s{APwZYDu>_4u_8+|DBM)iJ?y@4=oAT4S7m8# zmwY-N3aUxPK=#SJL9Q#URLY@so_^SZC+@SLh(+^dgn}HB^*aO0lNd{r)An~h^?SD2 z?%FFS&e*dLK5Q4Behwvt2pm~yeAIfm$m-Q`YwXx&rHLIjGB;&WrEGuk&wj&xg#CHLGHT>(l+nlSrZ zb|d|GgF~FASVgDw^$GbDD^&L%-EYwo4_a+xjL8|J$#8$wvOFlTv%YD6{?xMli%);t z-hStMY;B`uM_xW*Pu~B4Z5=*|YnN^)-G3UqTaB^u5DJ` zeYK76p10!2j5CqQ%ObwYZW3FIyDPWgoIrjVFB>D}$l|%r(+x_6a)+VO zT>&{II0j|zJ&asmq$%2(Cbf3f&bvsI_k3+ZQd`ck+(xwSOPW;MUGFjPGKFDN}V$@F% z0w+@muW`~qmhduwA!AA@F1rM0Uy2P8RHsW-Lf{AAvf#NNS-#e=gg`7vIn=fo3Y9W- zHXYc}*VgR^&o0_cAGpCfi)%J_sVPR?0rdd9}~?6#1Q8JU=}peC;* zP$UFZ%JGaa2)s)G9TEa3QKDLCVlao{;4OUDa=~9$M?ZKePPN!&a&l zu|gTwB*~}}K#-=~fz+i|&mMSs!FKH5X-lsy+RDBvz#{@5YwzIFzhYN)uI$`W8vRii;avb@j}=hE?t1sB({#S$2lRZ~Ku+ieqpR2M2# zfKkTgX01B2-NJ1sSmA5n-;idyzHQ+tvXkzW49H z$L_l4ZkwK+wR20W_WZLywbu?kVOysc$uni3IFWLQHzZaL^us7d!qaCw1pr1C-)qbXztps2^TZl zVr7MdTa%5(=p%d|2&4?U*8cGWR>A@WYWV;L~wAqbii<`^VBKFk*ma0QArH-x&h`|LzTvM3g z>_Qu_$&-D3gqNp0!h}FICDxHqatKO#`oX8{2jBQBo7g^KH@)d5+jsSqw(Z92ty45x zJoUQedR3Gek1`M!^S z;-D-l<7d|u>sdIc>F^zwUIP^0d?CL0+VghToo~0ZPm;9OTe!4{W|l7ijj@j-!jOd2 z$K}w!?Vc{$Qj?y6n8XRKjxrtLVTzLIqKI;-Xti1iJdoN0-+0gteg8q5-#>3RzVRlT zzjEGYZoJMGNvfOY&KXtKViM1whY|?EKFZ6N-~q{`P%WnPJ016k240qz@(17(t%S;m z;EvoEA3F)gxtsze>Tn++eN~8E!A#)-d4m+tJoT)7@ZE2*!_PcpCqMU9+qtblyGydl zqve?Ri%C$4KyRc?&d)FK7Uw!4%V1hQZinh|+uv5ogu^dVc3B}gRCEHxv zvgYc#b+ujF@RBHxkWwp^ycLU8>kvW%lwBh`%D?%RL9=eI523+=sEPiq7kWir)Xte%2gdRVYTD5xLk_NeMoBFQMm?E*5t z=1>E**@^7f>2=!z&=jGe9F%Zx2L@x5AxxsR#pYJmcI@3@2k-r&4Z2+qmHY31$XUs+A0Z3F?mYa@|lJ zy*oUCDoZ4?nDAL$-n1s2Azn;~&};#QO3-`n-S4*jcid**{p0^dWx@MKYgVs~T1a_R zK2uH#2!R~XI>2C-BiWABE%#-RpdhfUFQMiBT1^pi0TQgPUb6YzioNaUZ?@0;%l}{t zubu&5RP5~0C7YmW`Q^9ISqt)1Pf?VZdK z*kgbB81Bw|^bsL0T%ucTjFc&PfUXe$5lRf%mojq1f2j(1`&$pt2O*Fh?*z|>)?Qz? z+U6_v{tv#({@|1U$xc7_k`>2C?EDLh)Y_D6h4?l-ruxMe` z5-BhVXrWs+d(AGZUomAr{<9xixmflEl@d89!kYxPA|XeWML8%Qs8U-cWb(Cw)c_7M zg$wK+?}n#kgunm+w$4!Q?s~6%?f3u0UVY|8tIdqt(o0L$C>KCpxDchJ&*TI8_lW!C zwo+o|^)x0}djO*mAjKEMIHxMm<9Gwi@On_OGu!6v?2g?qSH)|%H*Q;%F}J<_4m){n z#g3miZcA<26XRu@o-EtcWX(o7SHK8btm!PZZRzzU3gF|bt=jYS53oQZtFIlgkACDX`{tLv z1~PlxMz>Ab!b^)*BZ<_AC0XuP5+G^@c;Rv+Mfk?l2#r!2((({+DE14bGUYXAPb|iE zZu=fPS!r1Qz#Hw%#T7f3PE#q==HeZRTHb+Oam{WU-F}4?Yf~21sTx%puX7h|CN10i zR5fE2f-@6i0I>}__cuSauocYhr%Xi*phwgjG9{<+&ZG6v+oj$f;WyTbNv_)L_)&nqx3&q1l z%>V)c^}k9SSD2?y3DXKuEg-I$a{yk?gYIs+?K-=F(9eJSQQI+6wUOOd!33p?IfJA} z;_JAp7U>6eZJV@Q9VL^M1#BW9_bX4mNO`56!*_vkNkdf!qd;?RvTCD*U76NAAr}=< zP@i#SlC*%BdVmt}yhv5ZGrrQO5%Rc^9)S%Jysf?Z0&#r}@l>(n=a+2nZ8vz2-vC0^ zv4j>ZZn<*I#wqLx6PJi7Z-dF@0c9;kA&Ni6K|s>cBP(UyfE+L&-Cl*u&g|N5b64%M z=^axxIx}l2$`k!O-jix^6~(2LuU$K4tU5h|w~4o`<*lo)AoqA##b!Z4jTFEe8l`%L z+F$gBt6Z-lEXn{c35S27zEYgXQNg4!j)qOrzJ@4k_nY2eDS5}Kx+d4kBYxg zUJ=3thzRwmzKbX$m2kl14GSh`No09|53MQ|>tc0zS^@%eScX^0@FXfbmrt)ua{4O$C&$1SusbR7zM|A3f_39#KiwM+$>2ZbS}U z?8TuSUn|+h*!8yWJs-9CU-+;cdH5;2_`+$sX3v<-&sMzWkICS7+;J;R2su!41Z`!- z9OzG20F4hoggwB9*wE`UF)mwBAC9sWB86%#V+NNW0i~4)qFCq=CCGBpK$k903m6il zgpzhoS8e+kF^?y)`{e1wEdVqvj&ru59)F?y1Ol3a< zdYlvFb)5?i5ej;kA)Kt|eATfz``{;k(+=Eztxe96 z;(Uz`5W=l4S_U2B2{8g{u|qK)kw-!`E+}kv5*Un7>c~>iS={ZT95NrKlK`qpN>F+Y zM#kYEf^tJBR=Vu!?KOMz-nw14chq)}>E}iPLMS0oUm-pm z^Ki^YUsr_ULTF9@^pRKORNtxNRV-Sb-)Rv&c!(ykohLmds>$hbCM9+gG*6=w&o)pu?q4@+i99ce(99S4-gj%nc zl_N@8jxk{U+yFoAw#`~wS-hoPM&#`F+itU8{m6$cUmvr%opV;&T(N$;1=vxxPr&(l zs!YycE-|HSFMdMhDtM4T_1zo9FCXXx#JR$SfVBSlmUUYlYk><==7}+g7EA?+M;57&&v#>$Q2uFXBlVS;1qQ}=QwD1{|HW{?4^?Ds4G|G$?`M?Y6UX`QtNIY3MD^Z=W z9)Kv0^$1)|k-EElK^94^j1^RO7oL0C&V2T>W{*5%+b$lr{e`Ywx1BhhuE8W=nFN~x z!DA|cYl4qBQlpg}WXMvvAtfABUoI~LvM>;Qxn~MPXdM#w@P$!4rm6-F8MnHr9Xj8! zr}#c{KDNau!xFA#xv_*-{5x%rJ8}b^+1xY&Qkw^4)`)Uf*?*mNFI}YLBBM$z=LVy!2o+nQvDWy?O41h(A48BaGB0wdcvaf_o z5LWAX+K*ANazFvZr%z1t6k@259$Cp(C{GKdEkbF7{^HRyEjzRn+mkEsl&{zbWq9X! z1=QM=GSMn@vr3Q&@5k)xSz470TL3!7^!F-p`mWNz9azVcLFFj;4rM^Bvw ztl48+C{3MLKv>7uWU05dM^m1%|Y|tu@CyeT8zCBSm=)?gmML-htOxx6aT5_$u|d zBG_tQF;{d8aSew2(Nvy{FPx1=)pY0qCX(@kQkXLj=n)|5%WWxFMVL+s6&F;m$R$(+ z<&Ojf=D7w!;28QOsyvDj5C9=yRq_rl?R5F86Go#n*I`5~HzV6RzigZJx^*#zo;U6Q zoXv9kw6Fv@rHdlkD7Xq9$l;}0m^G9yh%61O3t=k|1bOJ|y*uAQXoW$yC$@+zwYIrz z#U=nG6$*6{A(n(eW$S?Cr>06Pxtdnlzo&PGP)U&9mO!OjQl!0AZ>Hwg&Y_qjSMOY_CMAnJgdM>B+H6PJ;*4c47t6QoU0=v5NcZlK*`|&6|B@jSo{tp0ICoO zts)%MK$Ba5oT^YI+iCiYTR(QUJ@Du;dug5W+HP8~bk3q9NAVcFii^U#1MotZQV4yL zZI9%u?*S zS^*tkV7S6qX7FyQ%ehAtC_YY7s-dL}#;aGdJCA2R`zv_RTMRi$?4O!fqOoWG&oWuU|wC z*-56vxN?J@K2Oe0lHqFBsMTy_jz)|5N}$3f2`|ALUiW7N(A#R-*6NaNHCJtZ?_SG& z0S}N6t~`XqF=530j>!UBzyBLnIsP0jPMJdpbys>DTb0@kK71A@_YVT%9qam{C-qOu z&jH5)aT^Fo$t@8;%2YfSBnAFnWL&4$$E9&=JYy&EK%!{`5co7mQN1 z-k@W-FaPiu;h(vVJU|K0-}T{KwMv_M|JSVe$`OmHpQ-?<+AaP$Y6W=cm06X`gg8^; zOjXw81$cv6VbyL4l850$36VI*;Eou#$QMoSS?iJ`q)i%|%2*xTF%1@|+9-lY<(hkB zo3I*mx2#Rg*J5be=>cx~*6MYb3zo!}vIz?GjukMZ1<_7t6U7gJ#a(NlxQ~AF6ZY5l z-D@vC_ETFt-oyh?fB=Z{9%3PYgu+cpJbJFa^}VlJ@%2|Y7AR4r;5x1s(Ya{F$3sU> zKvhtt4wMk_0KrZ;AVf%tSXcnXABvJ?$LDugIyGZ!`MO!7;g7ija)IhXcVo*sttKA8 zw@s4b9wjfPa8RbFPYD1gihE#_3=3@awyfV8_j+j(G~a=2l+&p&cB)iRf2U*%9(Adg!WDPuUMwg3DNEx*13VMJ94qm<>8rorX@ zFhtFeR}|$Awj?HHa;?v(Q}pV>ei5bVIX!!}wSBiO)Tgbveu+fcCyqgw2#_ZS)Z@U` z07mVtO}veXQDB$*1{Co=z^N~HqV>>75T@6~vk*{@sud!1?GIm7wmFRlf>`|`Va!H=NqtU%^Fo%Y zx*A7LMv&CD1is`^-#C0R-MsLstsMP{Ex!1?CCxUj(z8vF-pSJ&_VIV_^A;57;jvf( z5Xmr-Q;1Aj7vQm8+Y?-|1W~y@rAf=&2{EY<6H?SlkRG5EF^5)=2UfDASaN_>&cloh z0drq};Iy5*ut9#qa?lhBg^v^rc=MHMy#k}Ms~0t}LsrHmxUd6VWd_7h7;ry@v%0Eb3%T3}?ucL+5LHL-AT>)5Jd(#1HoH}mhlvtEC69FRd zH!*-b)}xMB0xXPmyFzeKNcwF`A>}ee9V&YyLfsotzZFFhnK_n3FqMbV5F{wT1wbc3 z7H#T^UG~2ZownDWJLTvtMB5317o~7+lfsp#eI?1~Q{KtEqAvwARfzC{V-!|T*s$$A zh&)M;%+gAQM7oLxpy2=ZyT6+TBXz5fPPogoVC+pd9Iz`7?6b zzp%S9zoJ=hUX%x0gd`Y%>#&LsL{=gx^pus=ou(aS9pZ;^^ptfqqoc?Y#TvkXrEBn_X*OTC($t7j5bEEB4Zp&mtV4fiPG<0tQg4D|aGJ zg%9+|S#?}-tVSdb(X|q(h^Kl(M7@B@o#Ic7$QD}P3w?1@0HS~|t6XYDSiY%c^FFzw zM)^FwfEVG~B^01_LxCzKG$l6}unIz}(&)u(VSpO+^_)=(Qyx(3F8D1&H7(pTGIFrJbkVwNtCWl_M)f_`PnL~vIvv9i8aW3@b`x6T3>)5}vzj#|i!gplAw@A)csWb>c{T)U_;g4di{ z>{^HUJgXzP`vGpPX9F@WJt1@BiG*xX!s7{kfr(TRG+s}F!c-4rNXTWv4_fjDy)-dc zZQH`ag3a!pv+(b}=NAu_%5}6!8%x&RTu*FmanmlovTi4TOcmHi@6{#IhzK()e0>+j z1dZJ?2rdRx7G;qT;nb8U4)cP!rL1yr=-F5pqe%P=OnbJ135F`G-EuJyM4!tJ|^5(_gBMo52alZj#iEX4BC_ki9& zOJZ8L6T=~~E=_sTBSm|%Vh+#HQX*gF1ih+8PV`FQDGM4QbDUi0`Vy8NY02ks4_q2= zKzke!Hd<&S0iy`L!yut#0Ews*>pgL zh7}S8E}cNUwP`(2Qg7v))gJi2{w!SpN&gW_`CkhkF)o#&eP;9WhrUTfJr1#K2gSg! zasxG$(o?2NGPY<|#`>s-&XVy(y%#7zMe@Un{*ndEKls8L6^5}eE1_a3-JQXu!I`K; z5`x1F!U24Ub1%u9(f>V`$N5X(jF;i`7Mg2Mp$HG%5wqDF<}KI+dTy?-+xpt3b+$lY zJs2!ZSZVGm+qQcjlWBdgNO@!ew5TUOYDuFA=3>+l4`^z7WPS-mO=p;dcMrfTlzjwP z%FI)Ouwq{BDdt-(Fb7IkF3~&00~GB4mCh@L`U|dUDUR?cAc*n-MH8%$^Y;?;pR)8? ze2!#XLCA_iOWrO|Aw1+AUN)7($VIi?V^-Wb;`yn(8d>>5*Zv>Rg@Y<8a-i)10000< KMNUMnLSTXg;ov#| literal 0 HcmV?d00001 diff --git a/test/test_files/mask_basic.png b/test/test_files/mask_basic.png new file mode 100644 index 0000000000000000000000000000000000000000..0c2e9a6899e5c0381ce7c7364b31d684464ab423 GIT binary patch literal 362 zcmeAS@N?(olHy`uVBq!ia0vp^4j|0I1SD0tpLGH$#^NA%Cx&(BWL^R}Ea{HEjtmSN z`?>!lvI6;>1s;*b3=DjSL74G){)!Z!;D1jS$B+!?x96kznhkhd0>ACglv+8xYZYf@ zg7GSo=i*7f^M21SZkWY22=JGmJVkbv-kp#gwEW^`mya*CxhPGhwnXiGR`q; zo_<(}c}Ira`{OCB1*f%^@py1P=&GJ^RapMOKMtQgA~FY_S)9o&KEP?c*l_PbMs=N@ zV_(`fFl>98;NHv3x8~HrJ}GvYh^fu8rhFC~wAih)4l!|Ub4{*lX7-Da=zYmMk&)H< zvBCDUjP^HV8at;yV&?jG|L~f5QyQAI!wgd$!W<6jey*@rGhFask>nnkY0@8#W!nC+ rec9Q-ulM)Z_RxN|lmdu{w!YU_dMY9qnsK!N7!(Yiu6{1-oD!M Date: Sat, 29 Oct 2022 21:50:06 +0300 Subject: [PATCH 29/71] extras test template added --- test/extras_test.py | 29 +++++++++++++++++++++++++++++ test/img2img_test.py | 4 ++-- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/test/extras_test.py b/test/extras_test.py index e69de29b..2e1764d9 100644 --- a/test/extras_test.py +++ b/test/extras_test.py @@ -0,0 +1,29 @@ +import unittest +import requests +from gradio.processing_utils import encode_pil_to_base64 +from PIL import Image + +class TestExtrasWorking(unittest.TestCase): + def setUp(self): + self.url_img2img = "http://localhost:7860/sdapi/v1/extra-single-image" + self.simple_extras = { + "resize_mode": 0, + "show_extras_results": True, + "gfpgan_visibility": 0, + "codeformer_visibility": 0, + "codeformer_weight": 0, + "upscaling_resize": 2, + "upscaling_resize_w": 512, + "upscaling_resize_h": 512, + "upscaling_crop": True, + "upscaler_1": "None", + "upscaler_2": "None", + "extras_upscaler_2_visibility": 0, + "image": "" + } + +class TestExtrasCorrectness(unittest.TestCase): + pass + +if __name__ == "__main__": + unittest.main() diff --git a/test/img2img_test.py b/test/img2img_test.py index d8ed309d..61e3e285 100644 --- a/test/img2img_test.py +++ b/test/img2img_test.py @@ -3,7 +3,7 @@ import requests from gradio.processing_utils import encode_pil_to_base64 from PIL import Image -class Img2ImgWorking(unittest.TestCase): +class TestImg2ImgWorking(unittest.TestCase): def setUp(self): self.url_img2img = "http://localhost:7860/sdapi/v1/img2img" self.simple_img2img = { @@ -56,4 +56,4 @@ class TestImg2ImgCorrectness(unittest.TestCase): pass if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() From 4609b83cd496013a05e77c42af031d89f07785a9 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sat, 29 Oct 2022 16:09:19 -0300 Subject: [PATCH 30/71] Add PNG Info endpoint --- modules/api/api.py | 12 +++++++++--- modules/api/models.py | 9 ++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 49c213ea..8fcd068d 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -5,7 +5,7 @@ import modules.shared as shared from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.sd_samplers import all_samplers -from modules.extras import run_extras +from modules.extras import run_extras, run_pnginfo def upscaler_to_index(name: str): try: @@ -32,6 +32,7 @@ class Api: self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) + self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -125,8 +126,13 @@ class Api: return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) - def pnginfoapi(self): - raise NotImplementedError + def pnginfoapi(self, req:PNGInfoRequest): + if(not req.image.strip()): + return PNGInfoResponse(info="") + + result = run_pnginfo(decode_base64_to_image(req.image.strip())) + + return PNGInfoResponse(info=result[1]) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index dd122321..58e8e58b 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,4 +1,5 @@ import inspect +from click import prompt from pydantic import BaseModel, Field, create_model from typing import Any, Optional from typing_extensions import Literal @@ -148,4 +149,10 @@ class ExtrasBatchImagesRequest(ExtrasBaseRequest): imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings") class ExtrasBatchImagesResponse(ExtraBaseResponse): - images: list[str] = Field(title="Images", description="The generated images in base64 format.") \ No newline at end of file + images: list[str] = Field(title="Images", description="The generated images in base64 format.") + +class PNGInfoRequest(BaseModel): + image: str = Field(title="Image", description="The base64 encoded PNG image") + +class PNGInfoResponse(BaseModel): + info: str = Field(title="Image info", description="A string with all the info the image had") \ No newline at end of file From 83a1f44ae26cb89492064bb8be0321b14a75efe4 Mon Sep 17 00:00:00 2001 From: Bruno Seoane Date: Sat, 29 Oct 2022 16:10:00 -0300 Subject: [PATCH 31/71] Fix space --- modules/api/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 8fcd068d..d0f488ca 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -126,7 +126,7 @@ class Api: return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) - def pnginfoapi(self, req:PNGInfoRequest): + def pnginfoapi(self, req: PNGInfoRequest): if(not req.image.strip()): return PNGInfoResponse(info="") From 9bb6b6509aff8c1e6546d5a798ef9e9922758dc4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Oct 2022 22:20:02 +0300 Subject: [PATCH 32/71] add postprocess call for scripts --- modules/processing.py | 12 +++++++++--- modules/scripts.py | 24 +++++++++++++++++++++--- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 548eec29..50343846 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -478,7 +478,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: model_hijack.embedding_db.load_textual_inversion_embeddings() if p.scripts is not None: - p.scripts.run_alwayson_scripts(p) + p.scripts.process(p) infotexts = [] output_images = [] @@ -501,7 +501,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] - if (len(prompts) == 0): + if len(prompts) == 0: break with devices.autocast(): @@ -590,7 +590,13 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) devices.torch_gc() - return Processed(p, output_images, p.all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], all_prompts=p.all_prompts, all_seeds=p.all_seeds, all_subseeds=p.all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts) + + res = Processed(p, output_images, p.all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], all_prompts=p.all_prompts, all_seeds=p.all_seeds, all_subseeds=p.all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts) + + if p.scripts is not None: + p.scripts.postprocess(p, res) + + return res class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): diff --git a/modules/scripts.py b/modules/scripts.py index a7f36012..96e44bfd 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -64,7 +64,16 @@ class Script: def process(self, p, *args): """ This function is called before processing begins for AlwaysVisible scripts. - scripts. You can modify the processing object (p) here, inject hooks, etc. + You can modify the processing object (p) here, inject hooks, etc. + args contains all values returned by components from ui() + """ + + pass + + def postprocess(self, p, processed, *args): + """ + This function is called after processing ends for AlwaysVisible scripts. + args contains all values returned by components from ui() """ pass @@ -289,13 +298,22 @@ class ScriptRunner: return processed - def run_alwayson_scripts(self, p): + def process(self, p): for script in self.alwayson_scripts: try: script_args = p.script_args[script.args_from:script.args_to] script.process(p, *script_args) except Exception: - print(f"Error running alwayson script: {script.filename}", file=sys.stderr) + print(f"Error running process: {script.filename}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + + def postprocess(self, p, processed): + for script in self.alwayson_scripts: + try: + script_args = p.script_args[script.args_from:script.args_to] + script.postprocess(p, processed, *script_args) + except Exception: + print(f"Error running postprocess: {script.filename}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) def reload_sources(self, cache): From 4cb5983c308cb8a4940b00babc2cf6fc9261692f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Oct 2022 22:38:50 +0300 Subject: [PATCH 33/71] rename french translation to be in line with others --- localizations/{fr-FR.json => fr_FR.json} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename localizations/{fr-FR.json => fr_FR.json} (100%) diff --git a/localizations/fr-FR.json b/localizations/fr_FR.json similarity index 100% rename from localizations/fr-FR.json rename to localizations/fr_FR.json From d699720254365069866eafcdc519743664075a6d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 29 Oct 2022 22:39:10 +0300 Subject: [PATCH 34/71] add translators to codeowners with their respective translation files --- CODEOWNERS | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index 12e87aae..a48d8012 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,3 +1,13 @@ * @AUTOMATIC1111 - -/localizations/ko_KR.json @36DB \ No newline at end of file +/localizations/ar_AR.json @xmodar @blackneoo +/localizations/de_DE.json @LunixWasTaken +/localizations/es_ES.json @innovaciones +/localizations/fr_FR.json @tumbly +/localizations/it_IT.json @EugenioBuffo +/localizations/ja_JP.json @yuuki76 +/localizations/ko_KR.json @36DB +/localizations/pt_BR.json @M-art-ucci +/localizations/ru_RU.json @kabachuha +/localizations/tr_TR.json @camenduru +/localizations/zh_CN.json @dtlnor @bgluminous +/localizations/zh_TW.json @benlisquare From f62db4d5c753bc32d2ae166606ce41f4c5fa5c43 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 03:55:43 +0800 Subject: [PATCH 35/71] fix progress response model --- modules/api/api.py | 30 ------------------------------ modules/api/models.py | 8 ++++---- 2 files changed, 4 insertions(+), 34 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index e93cddcb..7e8522a2 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,33 +1,3 @@ -# import time - -# from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI -# from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -# from modules.sd_samplers import all_samplers -# from modules.extras import run_pnginfo -# import modules.shared as shared -# from modules import devices -# import uvicorn -# from fastapi import Body, APIRouter, HTTPException -# from fastapi.responses import JSONResponse -# from pydantic import BaseModel, Field, Json -# from typing import List -# import json -# import io -# import base64 -# from PIL import Image - -# sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None) - -# class TextToImageResponse(BaseModel): -# images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") -# parameters: Json -# info: Json - -# class ImageToImageResponse(BaseModel): -# images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.") -# parameters: Json -# info: Json - import time import uvicorn from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image diff --git a/modules/api/models.py b/modules/api/models.py index 8d4abc39..e1762fb9 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,6 +1,6 @@ import inspect from click import prompt -from pydantic import BaseModel, Field, create_model +from pydantic import BaseModel, Field, Json, create_model from typing import Any, Optional from typing_extensions import Literal from inflection import underscore @@ -158,6 +158,6 @@ class PNGInfoResponse(BaseModel): info: str = Field(title="Image info", description="A string with all the info the image had") class ProgressResponse(BaseModel): - progress: float - eta_relative: float - state: dict + progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") + eta_relative: float = Field(title="ETA in secs") + state: Json From e9c6c2a51f972fd7cd88ea740ade4ac3d8108b67 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 04:02:56 +0800 Subject: [PATCH 36/71] add description for state field --- modules/api/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/api/models.py b/modules/api/models.py index e1762fb9..709ab5a6 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -160,4 +160,4 @@ class PNGInfoResponse(BaseModel): class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") - state: Json + state: Json = Field(title="State", description="The current state snapshot") From 88f46a5bec610cf03641f18becbe3deda541e982 Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 05:04:29 +0800 Subject: [PATCH 37/71] update progress response model --- modules/api/api.py | 6 +++--- modules/api/models.py | 4 ++-- modules/shared.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 7e8522a2..5912d289 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -61,7 +61,7 @@ class Api: self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) - self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"]) + self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): sampler_index = sampler_to_index(txt2imgreq.sampler_index) @@ -171,7 +171,7 @@ class Api: # copy from check_progress_call of ui.py if shared.state.job_count == 0: - return ProgressResponse(progress=0, eta_relative=0, state=shared.state.js()) + return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict()) # avoid dividing zero progress = 0.01 @@ -187,7 +187,7 @@ class Api: progress = min(progress, 1) - return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.js()) + return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict()) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 709ab5a6..0ab85ec5 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -1,6 +1,6 @@ import inspect from click import prompt -from pydantic import BaseModel, Field, Json, create_model +from pydantic import BaseModel, Field, create_model from typing import Any, Optional from typing_extensions import Literal from inflection import underscore @@ -160,4 +160,4 @@ class PNGInfoResponse(BaseModel): class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") - state: Json = Field(title="State", description="The current state snapshot") + state: dict = Field(title="State", description="The current state snapshot") diff --git a/modules/shared.py b/modules/shared.py index 0f4c035d..f7b0990c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -147,7 +147,7 @@ class State: def get_job_timestamp(self): return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp? - def js(self): + def dict(self): obj = { "skipped": self.skipped, "interrupted": self.skipped, @@ -158,7 +158,7 @@ class State: "sampling_steps": self.sampling_steps, } - return json.dumps(obj) + return obj state = State() From 9f104b53c425e248595e5b6481336d2a339e015e Mon Sep 17 00:00:00 2001 From: evshiron Date: Sun, 30 Oct 2022 05:19:17 +0800 Subject: [PATCH 38/71] preview current image when opts.show_progress_every_n_steps is enabled --- modules/api/api.py | 8 ++++++-- modules/api/models.py | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 5912d289..e960bb7b 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,7 +1,7 @@ import time import uvicorn from gradio.processing_utils import encode_pil_to_base64, decode_base64_to_file, decode_base64_to_image -from fastapi import APIRouter, HTTPException +from fastapi import APIRouter, Depends, HTTPException import modules.shared as shared from modules import devices from modules.api.models import * @@ -187,7 +187,11 @@ class Api: progress = min(progress, 1) - return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict()) + current_image = None + if shared.state.current_image: + current_image = encode_pil_to_base64(shared.state.current_image) + + return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image) def launch(self, server_name, port): self.app.include_router(self.router) diff --git a/modules/api/models.py b/modules/api/models.py index 0ab85ec5..c8bc719a 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -161,3 +161,4 @@ class ProgressResponse(BaseModel): progress: float = Field(title="Progress", description="The progress with a range of 0 to 1") eta_relative: float = Field(title="ETA in secs") state: dict = Field(title="State", description="The current state snapshot") + current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.") From 22a54b058211d7b067dce92a3c1dad8d5849da70 Mon Sep 17 00:00:00 2001 From: Strothis Date: Sat, 29 Oct 2022 23:43:30 +0200 Subject: [PATCH 39/71] Fix German Localization --- localizations/de_DE.json | 525 +++++++++++++++++++++------------------ 1 file changed, 282 insertions(+), 243 deletions(-) diff --git a/localizations/de_DE.json b/localizations/de_DE.json index b52fa681..56d54b54 100644 --- a/localizations/de_DE.json +++ b/localizations/de_DE.json @@ -1,42 +1,41 @@ - { "⤡": "⤡", "⊞": "⊞", "×": "×", "❮": "❮", "❯": "❯", + "view": "API ", + "api": "anzeigen", + "•": " • ", + "built with gradio": "Mit Gradio erstellt", "Loading...": "Lädt...", - "view": "zeigen", - "api": "api", - "•": "•", - "built with gradio": "Erstellt mit Gradio", - "Stable Diffusion checkpoint": "Stable Diffusion checkpoint", + "Stable Diffusion checkpoint": "Stable Diffusion Checkpoint", "txt2img": "txt2img", "img2img": "img2img", "Extras": "Extras", "PNG Info": "PNG Info", - "Checkpoint Merger": "Checkpoint Merger", + "Checkpoint Merger": "Checkpoint Fusion", "Train": "Trainieren", "Settings": "Einstellungen", "Prompt": "Prompt", - "Negative prompt": "Negativer Prompt", + "Negative prompt": "Negative Prompt", "Run": "Ausführen", "Skip": "Überspringen", "Interrupt": "Abbrechen", "Generate": "Generieren", - "Style 1": "Style 1", - "Style 2": "Style 2", + "Style 1": "Stil 1", + "Style 2": "Stil 2", "Label": "Bezeichnung", "File": "Datei", "Drop File Here": "Datei hier ablegen", "-": "-", - "o": "o", - "Click to Upload": "Klicken zum Hochladen", + "o": "oder", + "Click to Upload": "Hochladen", "Image": "Bild", "Check progress": "Fortschitt prüfen", "Check progress (first)": "Fortschritt prüfen (Initial)", - "Sampling Steps": "Sampling Steps", - "Sampling method": "Sampling method", + "Sampling Steps": "Samplingschritte", + "Sampling method": "Samplingmethode", "Euler a": "Euler a", "Euler": "Euler", "LMS": "LMS", @@ -54,37 +53,37 @@ "Height": "Höhe", "Restore faces": "Gesichter wiederherstellen", "Tiling": "Kacheln", - "Highres. fix": "Highres. fix", - "Firstpass width": "Breite erstdurchlauf", - "Firstpass height": "Höhe erstdurchlauf", - "Denoising strength": "Denoising stärke", - "Batch count": "Batch anzahl", - "Batch size": "Batch größe", - "CFG Scale": "CFG Scale", + "Highres. fix": "Highres. Fix", + "Firstpass width": "Breite Erstdurchlauf", + "Firstpass height": "Höhe Erstdurchlauf", + "Denoising strength": "Denoisingstärke", + "Batch count": "Batchanzahl", + "Batch size": "Batchgröße", + "CFG Scale": "CFG-Skala", "Seed": "Seed", "Extra": "Extra", - "Variation seed": "Variation seed", - "Variation strength": "Variation strength", - "Resize seed from width": "Seed von der Breite her ändern", - "Resize seed from height": " Seed von der Breite her ändern", - "Script": "Script", + "Variation seed": "Variationsseed", + "Variation strength": "Variationsstärke", + "Resize seed from width": "Seed von Breite ändern", + "Resize seed from height": "Seed von Höhe ändern", + "Script": "Skript", "None": "Nichts", - "Prompt matrix": "Prompt matrix", - "Prompts from file or textbox": "Prompts von Datei oder Textbox", - "X/Y plot": "X/Y graph", + "Prompt matrix": "Promptmatrix", + "Prompts from file or textbox": "Prompts aus Datei oder Textfeld", + "X/Y plot": "X/Y Graf", "Put variable parts at start of prompt": "Variable teile am start des Prompt setzen", - "Show Textbox": "Textbox Anzeigen", - "File with inputs": "Datei mit Inputwerten", - "Prompts": "Prompts", - "X type": "X typ", + "Iterate seed every line": "Iterate seed every line", + "List of prompt inputs": "List of prompt inputs", + "Upload prompt inputs": "Upload prompt inputs", + "X type": "X-Typ", "Nothing": "Nichts", "Var. seed": "Var. seed", "Var. strength": "Var. strength", - "Steps": "Steps", - "Prompt S/R": "Prompt S/R", - "Prompt order": "Prompt order", + "Steps": "Schritte", + "Prompt S/R": "Prompt Suchen/Ersetzen", + "Prompt order": "Promptreihenfolge", "Sampler": "Sampler", - "Checkpoint name": "Checkpoint name", + "Checkpoint name": "Checkpointname", "Hypernetwork": "Hypernetwork", "Hypernet str.": "Hypernet str.", "Sigma Churn": "Sigma Churn", @@ -94,76 +93,78 @@ "Eta": "Eta", "Clip skip": "Clip skip", "Denoising": "Denoising", - "X values": "X values", - "Y type": "Y type", - "Y values": "Y values", + "Cond. Image Mask Weight": "Cond. Image Mask Weight", + "X values": "X-Werte", + "Y type": "Y-Typ", + "Y values": "Y-Werte", "Draw legend": "Legende zeichnen", "Include Separate Images": "Seperate Bilder hinzufügen", "Keep -1 for seeds": "-1 als Seed behalten", - "Drop Image Here": "Bild hier ablegen", "Save": "Speichern", - "Send to img2img": "Senden an img2img", - "Send to inpaint": "Senden an inpaint", - "Send to extras": "Senden an extras", - "Make Zip when Save?": "Zip beim speichern erstellen?", - "Textbox": "Textbox", - "Interrogate\nCLIP": "CLIP\nAbfragen", + "Send to img2img": "An img2img senden", + "Send to inpaint": "An Inpaint senden", + "Send to extras": "An Extras senden", + "Make Zip when Save?": "Zip beim Speichern erstellen?", + "Textbox": "Textfeld", + "Interrogate\nCLIP": "Interrogate\nCLIP", + "Interrogate\nDeepBooru": "Interrogate\nDeepBooru", "Inpaint": "Inpaint", "Batch img2img": "Batch img2img", "Image for img2img": "Bild für img2img", - "Image for inpainting with mask": "Bild für inpainting mit maske", + "Drop Image Here": "Bild hier ablegen", + "Image for inpainting with mask": "Bild für inpainting mit Maske", "Mask": "Maske", - "Mask blur": "Masken Unschärfe", - "Mask mode": "Masken modus", + "Mask blur": "Maskenunschärfe", + "Mask mode": "Maskenmodus", "Draw mask": "Maske zeichnen", - "Upload mask": "Maske Hochladen", - "Masking mode": "Modus zum Maskieren", - "Inpaint masked": "Inpaint maskiertes", - "Inpaint not masked": "Inpaint nicht maskiertes", + "Upload mask": "Maske hochladen", + "Masking mode": "Maskierungsmodus", + "Inpaint masked": "Maskiertes inpainten", + "Inpaint not masked": "Nicht maskiertes inpainten", "Masked content": "Maskierter Inhalt", "fill": "ausfüllen", "original": "original", "latent noise": "latent noise", "latent nothing": "latent nothing", "Inpaint at full resolution": "Inpaint mit voller Auflösung", - "Inpaint at full resolution padding, pixels": "Inpaint mit voller Auflösung Abstand, Pixels", + "Inpaint at full resolution padding, pixels": "Inpaint bei voller Auflösung Abstand, Pixel", "Process images in a directory on the same machine where the server is running.": "Bilder in einem Verzeichnis auf demselben Rechner verarbeiten, auf dem der Server läuft.", "Use an empty output directory to save pictures normally instead of writing to the output directory.": "Ein leeres Ausgabeverzeichnis verwenden, um Bilder normal zu speichern, anstatt in das Ausgabeverzeichnis zu schreiben.", - "Input directory": "Input Verzeichnis", - "Output directory": "Output verzeichnis", - "Resize mode": "Größe anpassen - Modus", - "Just resize": "Nur größe anpassen", - "Crop and resize": "Zuschneiden und größe anpassen", + "Input directory": "Eingabeverzeichnis", + "Output directory": "Ausgabeverzeichnis", + "Resize mode": "Größenänderungsmodus", + "Just resize": "Nur Größe anpassen", + "Crop and resize": "Zuschneiden und Größe anpassen", "Resize and fill": "Größe anpassen und ausfüllen", - "img2img alternative test": "img2img alternative test", + "img2img alternative test": "img2img alternativer Test", "Loopback": "Loopback", "Outpainting mk2": "Outpainting mk2", "Poor man's outpainting": "Poor man's outpainting", - "SD upscale": "SD upscale", - "should be 2 or lower.": "sollte 2 oder niedriger sein.", - "Override `Sampling method` to Euler?(this method is built for it)": "`Sampling method` a Euler überschreiben? (diese methode is dafür ausgelegt)", - "Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "`prompt` zum gleichen wert wie `Originaler prompt` überschreiben? (und `Negativer prompt`)", - "Original prompt": "Originaler Prompt", - "Original negative prompt": "Originaler negativer prompt", - "Override `Sampling Steps` to the same value as `Decode steps`?": "`Sampling Steps` zum gleichen wert wie `Decode steps` überschreiben?", - "Decode steps": "Decode steps", - "Override `Denoising strength` to 1?": "`Denoising strength` zu 1 überschreiben?", - "Decode CFG scale": "Decode CFG scale", + "SD upscale": "SD-Upscale", + "should be 2 or lower.": "Sollte 2 oder niedriger sein.", + "Override `Sampling method` to Euler?(this method is built for it)": "`Samplingmethode` auf Euler setzen? (Diese Methode is dafür ausgelegt)", + "Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "`Prompt` auf denselben Wert wie `Originale Prompt` (und `Negative Prompt`) setzen?", + "Original prompt": "Originale Prompt", + "Original negative prompt": "Originale negative Prompt", + "Override `Sampling Steps` to the same value as `Decode steps`?": "`Samplingschritte` auf denselben Wert wie `Dekodierschritte` setzen?", + "Decode steps": "Dekodierschritte", + "Override `Denoising strength` to 1?": "`Denoisingstärke auf 1 setzen?", + "Decode CFG scale": "CFG-Skala dekodieren", "Randomness": "Zufälligkeit", - "Sigma adjustment for finding noise for image": "Sigma anpassungen um noise für Bilder zu finden.", - "Loops": "Loops", + "Sigma adjustment for finding noise for image": "Sigma-Anpassung für die Suche nach Noise des Bildes", + "Loops": "Schleifen", "Denoising strength change factor": "Denoising strength change factor", - "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Empfohlene Einstellungen: Sampling Schritte: 80-100, Sampler Methode: Euler a, Denoising stärke: 0.8", - "Pixels to expand": "Anz. Pixel zum erweitern", + "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Empfohlene Einstellungen: Samplingschritte: 80-100, Samplermethode: Euler a, Denoisingstärke: 0.8", + "Pixels to expand": "Pixel zum Erweitern", "Outpainting direction": "Outpainting Richtung", "left": "Links", "right": "Rechts", "up": "Hoch", "down": "Runter", - "Fall-off exponent (lower=higher detail)": "Fall-off exponent (weniger=mehr details)", - "Color variation": "Farb variationen", - "Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Skaliert das Bild auf die doppelte Größe; Benutz die Schieberegler für Breite und Höhe, um die Kachelgröße einzustellen", - "Tile overlap": "Kacheln überlappungen", + "Fall-off exponent (lower=higher detail)": "Abfallexponent (niedriger=mehr Details)", + "Color variation": "Farbabweichung", + "Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Skaliert das Bild auf die doppelte Größe; Benutze die Schieberegler für Breite und Höhe, um die Kachelgröße einzustellen", + "Tile overlap": "Kachelüberlappung", "Upscaler": "Upscaler", "Lanczos": "Lanczos", "LDSR": "LDSR", @@ -171,249 +172,287 @@ "ScuNET GAN": "ScuNET GAN", "ScuNET PSNR": "ScuNET PSNR", "ESRGAN_4x": "ESRGAN_4x", - "Single Image": "Ein Bild", - "Batch Process": "Massenverarbeitung", - "Batch from Directory": "Massenverarbeitung vom Verzeichnis", + "Single Image": "Einzelnes Bild", + "Batch Process": "Batchverarbeitung", + "Batch from Directory": "Batchverarbeitung aus Verzeichnis", "Source": "Quelle", "Show result images": "Bildergebnisse zeigen", - "Scale by": "Skalieren von", + "Scale by": "Skalieren um", "Scale to": "Skalieren zu", "Resize": "Größe anpassen", "Crop to fit": "Zuschneiden damit es passt", - "Upscaler 2 visibility": "Upscaler 2 sichtbarkeit", - "GFPGAN visibility": "GFPGAN sichtbarkeit", - "CodeFormer visibility": "CodeFormer sichtbarkeit", - "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer gewicht (0 = maximaler effekt, 1 = minimalster effekt)", - "Open output directory": "Zielverzeichnis öffnen", - "Send to txt2img": "Senden an txt2img", + "Upscaler 2 visibility": "Upscaler 2 Sichtbarkeit", + "GFPGAN visibility": "GFPGAN Sichtbarkeit", + "CodeFormer visibility": "CodeFormer Sichtbarkeit", + "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer Gewichtung (0 = maximale Wirkung, 1 = minimale Wirkung)", + "Upscale Before Restoring Faces": "Upscale Before Restoring Faces", + "Send to txt2img": "An txt2img senden", "A merger of the two checkpoints will be generated in your": "Die zusammgeführten Checkpoints werden gespeichert unter", - "checkpoint": "checkpoint", + "checkpoint": "Checkpoint", "directory.": "Verzeichnis.", "Primary model (A)": "Primäres Modell (A)", "Secondary model (B)": "Sekundäres Modell (B)", "Tertiary model (C)": "Tertiäres Modell (C)", "Custom Name (Optional)": "Eigener Name (Optional)", - "Multiplier (M) - set to 0 to get model A": "Multiplier (M) - auf 0 setzen um Modell A zu bekommen", + "Multiplier (M) - set to 0 to get model A": "Multiplikator (M) - auf 0 setzen, um Modell A zu erhalten", "Interpolation Method": "Interpolationsmethode", "Weighted sum": "Weighted sum", "Add difference": "Add difference", "Save as float16": "Speichern als float16", "See": "Siehe ", - "wiki": "wiki ", - "for detailed explanation.": "für eine detailierte erklärung.", + "wiki": "Wiki ", + "for detailed explanation.": "für eine ausführliche Erklärung.", "Create embedding": "Embedding erstellen", "Create hypernetwork": "Hypernetwork erstellen", "Preprocess images": "Bilder vorbereiten", "Name": "Name", "Initialization text": "Initialisierungstext", - "Number of vectors per token": "Anzahl der vektoren pro token", + "Number of vectors per token": "Anzahl der Vektoren pro Token", "Overwrite Old Embedding": "Alte Embeddings überschreiben", "Modules": "Module", - "Enter hypernetwork layer structure": "Hypernetwork-Schichtstruktur angeben", + "Enter hypernetwork layer structure": "Hypernetwork-Ebenenstruktur angeben", "Select activation function of hypernetwork": "Aktivierungsfunktion des Hypernetwork auswählen", "linear": "linear", "relu": "relu", "leakyrelu": "leakyrelu", "elu": "elu", "swish": "swish", - "Add layer normalization": "Schicht normalisierung hinzufügen", + "tanh": "tanh", + "sigmoid": "sigmoid", + "celu": "celu", + "gelu": "gelu", + "glu": "glu", + "hardshrink": "hardshrink", + "hardsigmoid": "hardsigmoid", + "hardtanh": "hardtanh", + "logsigmoid": "logsigmoid", + "logsoftmax": "logsoftmax", + "mish": "mish", + "prelu": "prelu", + "rrelu": "rrelu", + "relu6": "relu6", + "selu": "selu", + "silu": "silu", + "softmax": "softmax", + "softmax2d": "softmax2d", + "softmin": "softmin", + "softplus": "softplus", + "softshrink": "softshrink", + "softsign": "softsign", + "tanhshrink": "tanhshrink", + "threshold": "threshold", + "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "Auswahl der Initialisierung der Ebenengewichte. Empfohlen wird relu-like - Kaiming, sigmoid-like - Xavier", + "Normal": "Normal", + "KaimingUniform": "KaimingUniform", + "KaimingNormal": "KaimingNormal", + "XavierUniform": "XavierUniform", + "XavierNormal": "XavierNormal", + "Add layer normalization": "Ebenennormalisierung hinzufügen", "Use dropout": "Dropout benutzen", "Overwrite Old Hypernetwork": "Altes Hypernetwork überschreiben", "Source directory": "Quellenverzeichnis", "Destination directory": "Zielverzeichnis", - "Existing Caption txt Action": "Vorhandene Beschriftung der txt Aktion", + "Existing Caption txt Action": "Vorhandene Beschriftung der txt", "ignore": "ignorieren", "copy": "kopieren", - "prepend": "vorangestellt", + "prepend": "voranstellen", "append": "anhängen", "Create flipped copies": "Gespiegelte Bilder erstellen", "Split oversized images": "Übergroße Bilder aufteilen", - "Use BLIP for caption": "BLIP für Überschrift nutzen", - "Use deepbooru for caption": "Deepbooru für Überschrift nutzen", - "Split image threshold": "Bilder aufteilen Grenzwert", - "Split image overlap ratio": "Überschneidungsverhältnis beim Bilder aufteilen", + "Auto focal point crop": "Automatisch auf Fokuspunkt zuschneiden", + "Use BLIP for caption": "BLIP für Beschriftung nutzen", + "Use deepbooru for caption": "Deepbooru für Beschriftung nutzen", + "Split image threshold": "Schwellenwert für die Aufteilung von Bildern", + "Split image overlap ratio": "Überschneidungsverhältnis der Teilbilder", + "Focal point face weight": "Fokuspunkt Gesicht Gewicht", + "Focal point entropy weight": "Fokuspunkt Entropie Gewicht", + "Focal point edges weight": "Fokuspunkt Kanten Gewicht", + "Create debug image": "Testbild erstellen", "Preprocess": "Vorbereiten", - "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "Trainieren eines Embeddings oder ein Hypernetwork; Sie müssen ein Verzeichnis mit einem Satz von Bildern im Verhältnis 1:1 angeben", - "[wiki]": "[wiki]", + "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "Trainieren eines Embeddings oder eines Hypernetworks; Sie müssen ein Verzeichnis mit einem Satz von Bildern im Verhältnis 1:1 angeben", + "[wiki]": "[Wiki]", "Embedding": "Embedding", - "Embedding Learning rate": "Embedding Learning rate", - "Hypernetwork Learning rate": "Hypernetwork Learning rate", - "Dataset directory": "Dataset verzeichnis", - "Log directory": "Log verzeichnis", - "Prompt template file": "Vorlage Datei für Prompt", - "Max steps": "Max steps", - "Save an image to log directory every N steps, 0 to disable": "Alle N steps, ein Bild im Log Verzeichnis speichern, 0 zum deaktivieren", - "Save a copy of embedding to log directory every N steps, 0 to disable": "Alle N steps, eine Kopie des Embedding im Log Verzeichnis speichern, 0 zum deaktivieren", - "Save images with embedding in PNG chunks": "Das Bild mit embedding in PNG Fragmente speichern", - "Read parameters (prompt, etc...) from txt2img tab when making previews": "Bei vorschau die Parameter (prompt, etc...) vom txt2img tab lesen", + "Embedding Learning rate": "Embedding Lernrate", + "Hypernetwork Learning rate": "Hypernetwork Lernrate", + "Dataset directory": "Datensatzverzeichnis", + "Log directory": "Protokollverzeichnis", + "Prompt template file": "Prompt-Vorlagendatei", + "Max steps": "Max Schritte", + "Save an image to log directory every N steps, 0 to disable": "Speichere alle N Schritte ein Bild im Protokollverzeichnis, 0 zum Deaktivieren", + "Save a copy of embedding to log directory every N steps, 0 to disable": "Speichere alle N Schritte eine Embeddingkopie im Protokollverzeichnis, 0 zum Deaktivieren", + "Save images with embedding in PNG chunks": "Speichere Bilder mit Embeddings in PNG Chunks", + "Read parameters (prompt, etc...) from txt2img tab when making previews": "Lese Parameter (Prompt, etc...) aus dem txt2img-Tab beim Erstellen von Vorschaubildern.", "Train Hypernetwork": "Hypernetwork Trainieren", "Train Embedding": "Embedding Trainieren", - "Apply settings": "Einstellungen anwenden", + "Apply settings": "Eintellungen anwenden", "Saving images/grids": "Bilder/Raster speichern", - "Always save all generated images": "Grundsätzlich alle generieren Bilder speichern", + "Always save all generated images": "Immer alle generierten Bilder speichern", "File format for images": "Dateiformat für Bilder", - "Images filename pattern": "Dateinamen vorlage für Bilder", - "Add number to filename when saving": "Beim speichern, dem Dateinamen die Nummer anhängen", - "Always save all generated image grids": "Grundsätzlich alle generieren Raster speichern", + "Images filename pattern": "Dateinamensmuster für Bilder", + "Add number to filename when saving": "Beim speichern, dem Dateinamen Nummer anhängen", + "Always save all generated image grids": "Immer alle generierten Bildraster speichern", "File format for grids": "Dateiformat für Raster", - "Add extended info (seed, prompt) to filename when saving grid": "Speichern von Raster, erweiterte infos (seed, prompt) dem Dateinamen anhängen", - "Do not save grids consisting of one picture": "Raster, die nur aus einem Bild bestehen, nicht speichern", - "Prevent empty spots in grid (when set to autodetect)": "Lücken im Raster verhindern (falls aus autodetect gesetzt)", - "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "Raster reihen anzahl; -1 für Autodetect und 0 für die anzahl wie batch size", - "Save text information about generation parameters as chunks to png files": "Information zu Generierungsparameter als Fragmente in PNG dateien speichern", - "Create a text file next to every image with generation parameters.": "Textdatei mit Generierungsparameter seperat zu Bilddatei speichern", - "Save a copy of image before doing face restoration.": "Kope des Bildes vor und nach Gesichtswiederhestellung speichern", - "Quality for saved jpeg images": "Qualität der als JPEG gespeicherten Bilder", - "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Wenn PNG Bild größer als 4MB oder Dimensionen Größer als 4000, herunterskalieren und als JPG speichern.", - "Use original name for output filename during batch process in extras tab": "Originaler name als Output Dateinamen benutzen während Massenverarbeitung in Extras tab.", - "When using 'Save' button, only save a single selected image": "Beim benutzen von 'Speichern', nur das gewählte Bild speichern.", + "Add extended info (seed, prompt) to filename when saving grid": "Beim Speichern von Rastern zusätzliche Information (Seed, Prompt) hinzufügen", + "Do not save grids consisting of one picture": "Keine Raster speichern, die nur aus einem Bild bestehen", + "Prevent empty spots in grid (when set to autodetect)": "Lücken im Raster verhindern (falls auf Auto-Erkennung gesetzt)", + "Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "Rasterreihenanzahl; -1 für Auto-Erkennung und 0 für die gleiche wie die Batchanzahl", + "Save text information about generation parameters as chunks to png files": "Generationsparameter als Chunks in PNG-Dateien speichern", + "Create a text file next to every image with generation parameters.": "Erstelle zu jedem Bild eine Textdatei, die die Generationsparameter enthält", + "Save a copy of image before doing face restoration.": "Vor der Gesichtswiederhestellung eine Kopie des Bildes speichern", + "Quality for saved jpeg images": "Qualität der JPEG-Bilder", + "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Wenn ein PNG-Bild größer als 4MB oder die Dimensionen größer als 4000 ist, herunterskalieren und als JPG speichern.", + "Use original name for output filename during batch process in extras tab": "Orginale Dateinamen als Ausgabenamen bei der Batchverarbeitung im Extras-Tab verwenden", + "When using 'Save' button, only save a single selected image": "Bei der Benutzung des 'Speichern'-Knopfes, nur das ausgewählte Bild speichern", "Do not add watermark to images": "Den Bildern kein Wasserzeichen hinzufügen", - "Paths for saving": "Pfade zum speichern", - "Output directory for images; if empty, defaults to three directories below": "Ausgabeverzeichnis für Bilder: wenn leer fällt zurück auf drei Verzeichnisse unterhalb", - "Output directory for txt2img images": "Ausgabeverzeichnis für txt2img", - "Output directory for img2img images": "Ausgabeverzeichnis für img2img", - "Output directory for images from extras tab": "Ausgabeverzeichnis für extras", - "Output directory for grids; if empty, defaults to two directories below": "Ausgabeverzeichnis für Raster; wenn leer fällt zurück auf zwei Verzeichnisse unterhalb.", - "Output directory for txt2img grids": "Ausgabeverzeichnis für txt2img Raster", + "Paths for saving": "Pfade zum Speichern", + "Output directory for images; if empty, defaults to three directories below": "Ausgabeverzeichnis für Bilder; Falls leer, werden die Pfade unterhalb verwendet", + "Output directory for txt2img images": "Ausgabeverzeichnis für txt2img Bilder", + "Output directory for img2img images": "Ausgabeverzeichnis für img2img Bilder", + "Output directory for images from extras tab": "Ausgabeverzeichnis für Extras-Tab Bilder", + "Output directory for grids; if empty, defaults to two directories below": "Ausgabeverzeichnis für Raster; Falls leer, werden die Pfade unterhalb verwendet", + "Output directory for txt2img grids": "Ausgabeverzeichnis für txt2img Raster", "Output directory for img2img grids": "Ausgabeverzeichnis für img2img Raster", - "Directory for saving images using the Save button": "Ausgabeverzeichnis Bilder für 'Speichern' Button", + "Directory for saving images using the Save button": "Ausgabeverzeichnis für Bilder, die mit dem 'Speichern'-Knopf gespeichert wurden", "Saving to a directory": "Speichern in ein Verzeichnis", "Save images to a subdirectory": "Bilder in ein Unterverzeichnis speichern", "Save grids to a subdirectory": "Raster in ein Unterverzeichnis speichern", - "When using \"Save\" button, save images to a subdirectory": "Beim benutzen von 'Save' Button, Bilder in Unterverzeichnis speichern", - "Directory name pattern": "Verzeichnisname pattern", - "Max prompt words for [prompt_words] pattern": "Maximale anzahl der Wörter für [prompt_words] pattern", - "Upscaling": "Hochskalieren", - "Tile size for ESRGAN upscalers. 0 = no tiling.": "Kachelgröße für ESRGAN upscalers. 0 = keine Kacheln.", - "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "Kacheln überlappungen, größe in pixel für ESRGAN upscalers. Niedrige Zahl = Sichtbare übergänge.", + "When using \"Save\" button, save images to a subdirectory": "Bilder bei der Benutzung des 'Speichern'-Knopfes in ein Unterverzeichnis speichern", + "Directory name pattern": "Muster für Verzeichnisnamen", + "Max prompt words for [prompt_words] pattern": "Maximale Wortanzahl für [prompt_words] Muster", + "Upscaling": "Upscaling", + "Tile size for ESRGAN upscalers. 0 = no tiling.": "Kachelgröße für ESRGAN-Upscaler. 0 = keine Kacheln.", + "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "Kachelüberlappung in Pixeln für ESRGAN-Upscaler. Niedrige Werte = sichtbare Naht.", "Tile size for all SwinIR.": "Kachelgröße für alle SwinIR.", - "Tile overlap, in pixels for SwinIR. Low values = visible seam.": "Kacheln überlappungen, größe in pixel für SwinIR. Niedrige Zahl = Sichtbare übergänge.", - "LDSR processing steps. Lower = faster": "LDSR verarbeitungsschritte. niedriger = schneller", + "Tile overlap, in pixels for SwinIR. Low values = visible seam.": "Kachelüberlappung in Pixeln für SwinIR. Niedrige Werte = sichtbare Naht.", + "LDSR processing steps. Lower = faster": "LDSR-Verarbeitungsschritte. Niedriger = schneller", "Upscaler for img2img": "Upscaler für img2img", - "Upscale latent space image when doing hires. fix": "Upscale latent space beim ausführen von hires. fix", + "Upscale latent space image when doing hires. fix": "Bild des Latent Space upscalen, wenn Highres. Fix benutzt wird", "Face restoration": "Gesichtswiederhestellung", - "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer gewicht parameter; 0 = maximaler effekt; 1 = minimalster effekt", - "Move face restoration model from VRAM into RAM after processing": "Gesichtswiederhestellungs-Modell nach Verarbeitung vom VRAM ins RAM verschieben", + "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer Gewichtung (0 = maximale Wirkung, 1 = minimale Wirkung)", + "Move face restoration model from VRAM into RAM after processing": "Verschiebe Gesichtswiederhestellung-Modell nach der Verarbeitung vom VRAM in den RAM", "System": "System", - "VRAM usage polls per second during generation. Set to 0 to disable.": "Abfragen pro Sekunde zur VRAM auslastung. 0 = deaktiviert", - "Always print all generation info to standard output": "Grundsätzlich alle Generierungsparameter im standard output ausgeben", - "Add a second progress bar to the console that shows progress for an entire job.": "Zweite Fortschrittsleiste in der Konsole hinzufügen, die den Fortschitt für den ganzen Job anzeigt.", + "VRAM usage polls per second during generation. Set to 0 to disable.": "VRAM-Nutzungsabfragen pro Sekunde während der Generierung. Zum Deaktivieren auf 0 setzen.", + "Always print all generation info to standard output": "Immer alle Generationsinformationen in der Standardausgabe ausgeben", + "Add a second progress bar to the console that shows progress for an entire job.": "Der Konsole einen zweiten Fortschrittsbalken hinzufügen, der den Fortschritt eines gesamten Auftrags anzeigt.", "Training": "Training", - "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "VAE und CLIP während Hypernetwork-Training in RAM verschieben. Spart VRAM.", + "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "VAE und CLIP während des Hypernetwork-Trainings in den RAM verschieben. Spart VRAM.", "Filename word regex": "Filename word regex", "Filename join string": "Filename join string", - "Number of repeats for a single input image per epoch; used only for displaying epoch number": "Anzahl der wiederholungen für ein einziges Input-Bild pro epoche; nur dazu genutzt um epochen nummer anzuzeigen", - "Save an csv containing the loss to log directory every N steps, 0 to disable": "CSV die den 'loss' alle N steps beinhaltet. 0 = deaktiviert", + "Number of repeats for a single input image per epoch; used only for displaying epoch number": "Anzahl der Wiederholungen für ein einzelnes Eingabebild pro Epoche; wird nur für die Anzeige der Epochennummer verwendet", + "Save an csv containing the loss to log directory every N steps, 0 to disable": "Speichere eine csv-Datei, die den Verlust enthält, im Protokollverzeichnis alle N Schritte, 0 zum Deaktivieren", "Stable Diffusion": "Stable Diffusion", - "Checkpoints to cache in RAM": "Anz. Checkpoints im RAM zu Cachen", - "Hypernetwork strength": "Hypernetwork stärke", - "Apply color correction to img2img results to match original colors.": "Farbkorrigierungen auf img2img anwenden um den Original zu gleichen.", - "Save a copy of image before applying color correction to img2img results": "Eine Kopie vom Bild vor der anwendung der img2img Farbkorrigierungen speichern.", - "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Mit img2img, führe die exakte anzahl an steps aus, die im Slider angegeben sind. (Im Normalfall würdest du mit Denoising weniger nehmen)", - "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "ermöglicht die Quantisierung in K-Samplern für schärfere und sauberere Ergebnisse. Dadurch können sich existierende Seeds verändern. Für die Anwendung ist ein Neustart erforderlich.", - "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Hervorhebung: Ermöglicht (text) damit das Modell dem Text (mehr) oder [weniger] gewichtung gibt.", - "Use old emphasis implementation. Can be useful to reproduce old seeds.": "Verwendet die alte Implementierung von Hervorhebungen. Kann für das benutzen von älteren Seeds nützlich sein.", - "Make K-diffusion samplers produce same images in a batch as when making a single image": "Sorgt dafür, dass K-Diffusions-Sampler in einem Batch die gleichen Bilder erzeugen wie bei der Erstellung eines einzelnen Bildes.", - "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "Erhöhung der Kohärenz durch Auffüllen ab dem letzten Komma innerhalb von N Token, wenn mehr als 75 Token verwendet werden", - "Filter NSFW content": "Filter NSFW inhalte", - "Stop At last layers of CLIP model": "Stop bei den letzten Schichten des CLIP-Modells", - "Interrogate Options": "Optionen abfragen", - "Interrogate: keep models in VRAM": "Abfragen: Modelle im VRAM behalten", - "Interrogate: use artists from artists.csv": "Abfragen: Künstler aus artists.csv verwenden", - "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "Abfragen: Rangfolge der Modell-Tags in den Ergebnissen berücksichtigen (hat keine Auswirkung auf beschriftungsbasierte Abfragesysteme).", - "Interrogate: num_beams for BLIP": "Abfragen: num_beams für BLIP", - "Interrogate: minimum description length (excluding artists, etc..)": "Abfrage: Mindestlänge der Beschreibung (ohne Künstler usw.)", - "Interrogate: maximum description length": "Abfragen: maximale Länge der Beschreibung", - "CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: maximale Anzahl von Zeilen in der Textdatei (0 = keine Begrenzung)", - "Interrogate: deepbooru score threshold": "Abfrage: deepbooru schwelle", - "Interrogate: deepbooru sort alphabetically": "Abfrage: deepbooru alphabetisch sortieren", - "use spaces for tags in deepbooru": "Leerzeichen für Tags in deepbooru verwenden", - "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "Escape-Klammern (\\) in deepbooru (damit sie als wörtliche Klammern und nicht zur Hervorhebung verwendet werden)", - "User interface": "Benutzerinterface", - "Show progressbar": "Fortschrittsleiste Anzeigen", - "Show image creation progress every N sampling steps. Set 0 to disable.": "Zeigt den Fortschritt der Bilderstellung alle N Schritte an. 0 setzen, um zu deaktivieren.", - "Show previews of all images generated in a batch as a grid": "Vorschaubilder aller in einem Batch erzeugten Bilder als Raster anzeigen", - "Show grid in results for web": "Raster in den Ergebnissen für Web anzeigen", - "Do not show any images in results for web": "Keine Bilder in den Ergebnissen für das Web anzeigen", - "Add model hash to generation information": "Modell-Hash zu Generierungsinformationen hinzufügen", - "Add model name to generation information": "Modell-Name zu Generierungsinformationen hinzufügen", - "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Beim Einlesen von Generierungsparametern aus Text in die Benutzeroberfläche (aus PNG-Informationen oder eingefügtem Text) wird das ausgewählte Modell/der ausgewählte Checkpoint nicht geändert.", - "Font for image grids that have text": "Schriftart für Bildraster, die Text enthalten", - "Enable full page image viewer": "Ganzseitenbildbetrachter einschalten", - "Show images zoomed in by default in full page image viewer": "Bilder standardmäßig vergrößert im Ganzseitenbildbetrachter anzeigen", - "Show generation progress in window title.": "Generierungsfortschritt im Fenstertitel anzeigen.", - "Quicksettings list": "Quicksettings-Liste", - "Localization (requires restart)": "Lokalisierung (erfordert Neustart)", - "Sampler parameters": "Sampler parameter", - "Hide samplers in user interface (requires restart)": "Ausblenden von Samplern in der Benutzeroberfläche (erfordert einen Neustart)", - "eta (noise multiplier) for DDIM": "eta (noise multiplier) für DDIM", - "eta (noise multiplier) for ancestral samplers": "eta (noise multiplier) für ancestral samplers", - "img2img DDIM discretize": "img2img DDIM discretize", + "Checkpoints to cache in RAM": "Checkpoints zum Zwischenspeichern im RAM", + "Hypernetwork strength": "Hypernetworkstärke", + "Inpainting conditioning mask strength": "Inpainting Stärke der Konditionierungsmaske", + "Apply color correction to img2img results to match original colors.": "Farbkorrektur auf die img2img-Ergebnisse anwenden, damit sie den Originalfarben entsprechen.", + "Save a copy of image before applying color correction to img2img results": "Vor dem Anwenden der Farbkorrektur eine Kopie des Bildes speichern", + "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Mit img2img, die exakte Anzahl der Schritte ausführen, die vom Schieberegler angegeben sind (normalerweise weniger bei weniger Denoising).", + "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Aktivieren der Quantisierung in K-Samplern für schärfere und sauberere Ergebnisse. Dies kann bestehende Seeds verändern. Erfordert Neustart.", + "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Hervorhebung: Verwenden Sie (Text), damit das Modell dem Text mehr Aufmerksamkeit schenkt, und [Text], damit es ihm weniger Aufmerksamkeit schenkt", + "Use old emphasis implementation. Can be useful to reproduce old seeds.": "Verwenden der alten Implementierung von Hervorhebungen. Kann nützlich sein, um alte Seeds zu reproduzieren.", + "Make K-diffusion samplers produce same images in a batch as when making a single image": "K-Diffusions-Sampler erzeugen in einem Batch die gleichen Bilder, wie bei der Erstellung eines einzelnen Bildes", + "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "Erhöhung der Kohärenz durch Auffüllen ab dem letzten Komma innerhalb von n Token, wenn mehr als 75 Token verwendet werden", + "Filter NSFW content": "NSFW-Inhalte filtern", + "Stop At last layers of CLIP model": "Stoppe bei den letzten Schichten des CLIP-Modells", + "Interrogate Options": "Interrogate Optionen", + "Interrogate: keep models in VRAM": "Interrogate: Modelle im VRAM behalten", + "Interrogate: use artists from artists.csv": "Interrogate: Künstler aus 'artists.csv' nutzen", + "Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "Interrogate: Die Rangfolge von Modell-Tags in den Ergebnissen einschließen (hat keine Auswirkung auf beschriftungsbasierte Interrogator).", + "Interrogate: num_beams for BLIP": "Interrogate: num_beams für BLIP", + "Interrogate: minimum description length (excluding artists, etc..)": "Interrogate: minimale Beschreibungslänge (Künstler, etc.. ausgenommen)", + "Interrogate: maximum description length": "Interrogate: maximale Beschreibungslänge", + "CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: maximale Anzahl an Zeilen in Textdatei (0 = Kein Limit)", + "Interrogate: deepbooru score threshold": "Interrogate: Deepbooru minimale Punkteanzahl", + "Interrogate: deepbooru sort alphabetically": "Interrogate: Sortiere Deepbooru alphabetisch", + "use spaces for tags in deepbooru": "Benutze Leerzeichen für Deepbooru-Tags", + "escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "Escape-Klammern (\\) in Deepbooru (damit sie als normale Klammern und nicht zur Hervorhebung verwendet werden)", + "User interface": "Benutzeroberfläche", + "Show progressbar": "Fortschrittsleiste anzeigen", + "Show image creation progress every N sampling steps. Set 0 to disable.": "Zeige eine Bildvorschau alle N Samplingschritte. Zum Deaktivieren auf 0 setzen.", + "Show previews of all images generated in a batch as a grid": "Zeige eine Vorschau aller erzeugten Bilder in einem Batch als Raster", + "Show grid in results for web": "Zeige Raster in der Web-UI Vorschau", + "Do not show any images in results for web": "Keine Bilder in der Web-UI Vorschau zeigen", + "Add model hash to generation information": "Hash des Modells zu den Generationsinformationen hinzufügen", + "Add model name to generation information": "Name des Modells zu den Generationsinformationen hinzufügen", + "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Beim Einlesen von Generierungsparametern aus Text in die Benutzeroberfläche (aus PNG-Info oder eingefügtem Text) wird das ausgewählte Modell/Checkpoint nicht geändert.", + "Send seed when sending prompt or image to other interface": "Den Seed, beim Senden des Bildes/Prompt zu einem anderen Tab, mitsenden", + "Font for image grids that have text": "Schriftart für Bildraster mit Text", + "Enable full page image viewer": "Ganzseitenbildbetrachter aktivieren", + "Show images zoomed in by default in full page image viewer": "Standardmäßig Bilder im Ganzseitenbildbetrachter vergrößert anzeigen", + "Show generation progress in window title.": "Generationsfortschritt im Fenstertitel anzeigen.", + "Quicksettings list": "Schnellzugriffsleiste", + "Localization (requires restart)": "Lokalisierung (Erfordert Neustart)", + "Sampler parameters": "Samplerparameter", + "Hide samplers in user interface (requires restart)": "Sampler in der Benutzeroberfläche verstecken (Erfordert Neustart)", + "eta (noise multiplier) for DDIM": "Eta (noise Multiplikator) für DDIM", + "eta (noise multiplier) for ancestral samplers": "Eta (noise Multiplikator) für Ancestral Sampler", + "img2img DDIM discretize": "img2img DDIM diskretisieren", "uniform": "uniform", "quad": "quad", "sigma churn": "sigma churn", "sigma tmin": "sigma tmin", "sigma noise": "sigma noise", "Eta noise seed delta": "Eta noise seed delta", - "Request browser notifications": "Browser-Benachrichtigungen anfordern", + "Request browser notifications": "Browserbenachrichtigungen anfordern", "Download localization template": "Vorlage für Lokalisierung herunterladen", - "Reload custom script bodies (No ui updates, No restart)": "Neu laden von benutzerdefinierten Skripten (keine Aktualisierung der Benutzeroberfläche, kein Neustart)", + "Reload custom script bodies (No ui updates, No restart)": "Benutzerdefinierte Skripte neu laden (keine Aktualisierung der Benutzeroberfläche, kein Neustart)", "Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Gradio neu starten und Komponenten aktualisieren (nur Custom Scripts, ui.py, js und css)", "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (zum Erzeugen Strg+Eingabe oder Alt+Eingabe drücken)", - "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt negativo (presiona Ctrl+Enter o Alt+Enter para generar)", - "Add a random artist to the prompt.": "Zufälligen Künstler den Prompt hinzufügen.", - "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "Import von Generierungsparameter aus dem Prompt oder aus der letzten Generierung, wenn das Prompt in der Benutzeroberfläche leer ist.", - "Save style": "Style speichern", - "Apply selected styles to current prompt": "Momentan ausgewählte styles auf den Prompt anwenden", - "Stop processing current image and continue processing.": "Verarbeitung des momentanen Bildes abbrechen und zum nächsten fortsetzen.", - "Stop processing images and return any results accumulated so far.": "Verarbeitung abbrechen und alle bisherigen Bilder ausgeben.", - "Style to apply; styles have components for both positive and negative prompts and apply to both": "Style, der angwendet werden soll. Styles haben sowohl negative als auch positive prompt anteile, die auf beide angewendet werden.", + "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Negative Prompt (zum Erzeugen Strg+Eingabe oder Alt+Eingabe drücken)", + "Add a random artist to the prompt.": "Zufälligen Künstler der Prompt hinzufügen.", + "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "Lesen der Generationsparameter aus der Prompt oder der letzten Generation (wenn Prompt leer ist) in die Benutzeroberfläche.", + "Save style": "Stil speichern", + "Apply selected styles to current prompt": "Momentan ausgewählte Stile auf die Prompt anwenden", + "Stop processing current image and continue processing.": "Verarbeitung des momentanen Bildes abbrechen und Verarbeitung fortsetzen.", + "Stop processing images and return any results accumulated so far.": "Verarbeitung abbrechen und alle bisherigen Ergebnisse ausgeben.", + "Style to apply; styles have components for both positive and negative prompts and apply to both": "Stil, der angwendet werden soll. Stile haben sowohl positive als auch negative Promptanteile und werden auf beide angewandt.", "Do not do anything special": "Nichts besonderes machen", - "Which algorithm to use to produce the image": "Der zu benutzende algorithmus für die Bildgeneration", - "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - sehr kreativ, jeder kann unterschiedliche Bilder, abhängig von der Step/Schritt anzahl bekommen. Werte höher als 30-40 helfen nicht.", - "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - am besten für img2img inpainting", + "Which algorithm to use to produce the image": "Der zu benutzende Algorithmus für die Bildgeneration", + "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - sehr kreativ, kann sehr unterschiedliche Bilder in Abhängigkeit von der Schrittanzahl bekommen. Werte höher als 30-40 helfen nicht.", + "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Modelle - am besten für inpainting", "Produce an image that can be tiled.": "Bild erzeugen, dass gekachelt werden kann.", - "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Verwendung eines zweistufigen Prozess, um ein Bild teilweise mit geringerer Auflösung zu erstellen, hochzuskalieren und dann die Details zu verbessern, ohne die zusammensetzung zu verändern.", - "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Bestimmt, wie wenig Bezug der Algorithmus zu dem Inhalt des Bildes haben soll. Bei 0 ändert sich nichts, und bei 1 erhalten Sie ein Bild ohne Bezug. Bei Werten unter 1,0 erfolgt die Verarbeitung in weniger Schritten, als der Schieberegler Sampling-Schritte angibt.", - "How many batches of images to create": "Wie viele Stapel von Bildern erstellt werden sollen", - "How many image to create in a single batch": "Wie viele Bilder in einem einzigen Stapel erstellt werden sollen", - "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - wie stark das Bild der Aufforderung entsprechen soll - niedrigere Werte führen zu kreativeren Ergebnissen", - "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Ein Wert, der die Ausgabe des Zufallszahlengenerators bestimmt: Wenn Sie ein Bild mit denselben Parametern und demselben Seed wie ein anderes Bild erstellen, erhalten Sie dasselbe Ergebnis.", + "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Verwendung eines zweistufigen Prozesses, um ein Bild mit geringerer Auflösung zu erstellen, hochzuskalieren und dann die Details zu verbessern, ohne die Komposition zu verändern.", + "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Bestimmt, wie wenig Bezug der Algorithmus zu dem Inhalt des Bildes haben soll. Bei 0 ändert sich nichts, und bei 1 besitzt das Bild keinen Bezug. Bei Werten unter 1,0 erfolgt die Verarbeitung in weniger Schritten, als der Schieberegler angibt.", + "How many batches of images to create": "Wie viele Sätze von Bildern erstellt werden sollen", + "How many image to create in a single batch": "Wie viele Bilder in einem Batch erstellt werden sollen", + "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - wie stark das Bild der Prompt entsprechen soll - niedrigere Werte führen zu kreativeren Ergebnissen", + "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Ein Wert, der die Ausgabe des Zufallszahlengenerators bestimmt: Wenn ein Bild mit denselben Parametern und demselben Seed wie ein anderes Bild erstellt wird, erhält man dasselbe Ergebnis.", "Set seed to -1, which will cause a new random number to be used every time": "Seed auf -1 setzen, so dass jedes Mal eine neue Zufallszahl verwendet wird", - "Reuse seed from last generation, mostly useful if it was randomed": "Wiederverwendung des Seed der letzten Generation, meist nützlich, wenn es zufällig gewählt wurde", - "Seed of a different picture to be mixed into the generation.": "Seed eines anderen Bildes, das bei der Erzeugung reingemischt wird.", - "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Wie stark die Veränderung sein soll. Bei 0 gibt es keinen Effekt. Bei 1 erhalten Sie das vollständige Bild mit variations Seed (außer bei ancestral samplers,wie Euler A, wo Sie nur etwas erhalten).", - "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Versuchen ein Bild zu erzeugen, das dem ähnelt, das mit demselben Seed bei der angegebenen Auflösung erzeugt worden wäre.", - "Separate values for X axis using commas.": "Trennen Sie die Werte für die X-Achse durch Kommas.", - "Separate values for Y axis using commas.": "Trennen Sie die Werte für die Y-Achse durch Kommas.", - "Write image to a directory (default - log/images) and generation parameters into csv file.": "Bild in ein Verzeichnis schreiben (Standard - log/images) und Generierungsparameter in eine csv-Datei.", - "Open images output directory": "Bildverzeichnis öffnen", - "How much to blur the mask before processing, in pixels.": "Wie stark die Maske vor der Verarbeitung verwischt werden soll, in Pixeln.", - "What to put inside the masked area before processing it with Stable Diffusion.": "Was soll in den maskierten Bereich vor der Verarbeitung mit Stable Diffusion.", + "Reuse seed from last generation, mostly useful if it was randomed": "Wiederverwendung des Seeds der letzten Generation, meist nützlich, wenn er zufällig gewählt wurde", + "Seed of a different picture to be mixed into the generation.": "Seed eines anderen Bildes, der bei der Erzeugung reingemischt wird.", + "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Wie stark die Veränderung sein soll. Bei 0 gibt es keinen Effekt. Bei 1 erhält man das vollständige Bild mit dem Variationsseed (außer bei Ancestral Samplern, wie Euler A, wo man nur etwas erhält).", + "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Versuche ein Bild zu erzeugen, das dem ähnelt, das mit dem Seed bei der angegebenen Auflösung erzeugt worden wäre.", + "Separate values for X axis using commas.": "Trenne die Werte für die X-Achse durch Kommas.", + "Separate values for Y axis using commas.": "Trenne die Werte für die Y-Achse durch Kommas.", + "Write image to a directory (default - log/images) and generation parameters into csv file.": "Bild in ein Verzeichnis (Standard - log/images) und Generationsparameter in eine csv-Datei schreiben.", + "Open images output directory": "Ausgabeverzeichnis öffnen", + "How much to blur the mask before processing, in pixels.": "Wie stark die Maske vor der Verarbeitung weichgezeichnet werden soll, in Pixeln.", + "What to put inside the masked area before processing it with Stable Diffusion.": "Was in den maskierten Bereich vor der Verarbeitung mit Stable Diffusion soll.", "fill it with colors of the image": "Füllen mit den Farben des Bildes", "keep whatever was there originally": "Originalen Inhalt behalten", "fill it with latent space noise": "Füllen mit latent space noise", - "fill it with latent space zeroes": "Füllen mit latent space zeroes", + "fill it with latent space zeroes": "Füllen mit latent space Nullen", "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Hochskalieren des maskierten Bereichs auf die Zielauflösung, Inpainting, Zurückskalieren und Einfügen in das Originalbild.", - "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Die Größe des Bildes auf die gewünschte Auflösung ändern. Wenn Höhe und Breite nicht übereinstimmen, erhalten Sie ein falsches Seitenverhältnis.", - "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Die Größe des Bildes so ändern, dass die gesamte Zielauflösung mit dem Bild ausgefüllt wird. Herausragende Teile zuschneiden.", - "Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Die Größe des Bildes so ändern, dass die gesamte Zielauflösung mit dem Bild ausgefüllt wird. Herausragende Teile mit Farben des Bildes ausfüllen.", - "How many times to repeat processing an image and using it as input for the next iteration": "Wie oft soll die Verarbeitung eines Bildes wiederholt werden, um es als Eingabe für die nächste Iteration zu verwenden", - "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "Im Loopback-Modus wird die Stärke der Rauschunterdrückung in jeder Schleife mit diesem Wert multipliziert. <1 bedeutet abnehmende Vielfalt, so dass Ihre Sequenz zu einem festen Bild konvergiert. >1 bedeutet zunehmende Vielfalt, so dass die Sequenz immer chaotischer wird..", - "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "Wie viel Pixel sollten sich beim SD-Upscale zwischen den Kacheln überlappen? Die Kacheln überlappen sich so, dass beim Zusammenfügen zu einem Bild keine deutlich sichtbare Naht entsteht..", + "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Die Größe des Bildes auf die gewünschte Auflösung ändern. Wenn Höhe und Breite nicht übereinstimmen, erhält man ein falsches Seitenverhältnis.", + "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Die Größe des Bildes so ändern, dass die gesamte Zielauflösung mit dem Bild ausgefüllt wird. Herausragende Teile werden abgeschnitten.", + "Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Die Größe des Bildes so ändern, dass das gesamte Bild enthalten ist. Lücken werden mit Farben des Bildes ausgefüllt.", + "How many times to repeat processing an image and using it as input for the next iteration": "Wie oft die Verarbeitung eines Bildes wiederholt und als Eingabe für die nächste Iteration verwendet werden soll", + "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "Im Loopback-Modus wird die Denoisingstärke in jeder Schleife mit diesem Wert multipliziert. <1 bedeutet abnehmende Vielfalt, so dass die Sequenz zu einem festen Bild konvergiert. >1 bedeutet zunehmende Vielfalt, so dass die Sequenz immer chaotischer wird.", + "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "Wie viel Pixel sich beim SD-Upscale zwischen den Kacheln überlappen. Die Kacheln überlappen sich so, dass beim Zusammenfügen zu einem Bild keine deutlich sichtbare Naht entsteht.", "A directory on the same machine where the server is running.": "Ein Verzeichnis auf demselben Rechner, auf dem der Server läuft.", "Leave blank to save images to the default path.": "Leer lassen, um Bilder im Standardpfad zu speichern.", "Result = A * (1 - M) + B * M": "Ergebnis = A * (1 - M) + B * M", "Result = A + (B - C) * M": "Ergebnis = A + (B - C) * M", - "1st and last digit must be 1. ex:'1, 2, 1'": "Erste und letzte Ziffer muss 1 sein. Bspw:'1, 2, 1'", + "1st and last digit must be 1. ex:'1, 2, 1'": "Erste und letzte Ziffer müssen 1 sein. Bspl:'1, 2, 1'", "Path to directory with input images": "Pfad zum Verzeichnis mit den Eingabebildern", - "Path to directory where to write outputs": "Pfad zum Verzeichnis, in das die Ausgaben geschrieben werden sollen", - "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime