Merge branch 'AUTOMATIC1111:master' into master

This commit is contained in:
Sihan Wang 2022-11-02 14:09:33 +08:00 committed by GitHub
commit 5c864be010
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 4986 additions and 1096 deletions

3
.gitignore vendored
View File

@ -29,4 +29,5 @@ notification.mp3
/textual_inversion
.vscode
/extensions
/test/stdout.txt
/test/stderr.txt

View File

@ -1 +1,13 @@
* @AUTOMATIC1111
/localizations/ar_AR.json @xmodar @blackneoo
/localizations/de_DE.json @LunixWasTaken
/localizations/es_ES.json @innovaciones
/localizations/fr_FR.json @tumbly
/localizations/it_IT.json @EugenioBuffo
/localizations/ja_JP.json @yuuki76
/localizations/ko_KR.json @36DB
/localizations/pt_BR.json @M-art-ucci
/localizations/ru_RU.json @kabachuha
/localizations/tr_TR.json @camenduru
/localizations/zh_CN.json @dtlnor @bgluminous
/localizations/zh_TW.json @benlisquare

35
javascript/extensions.js Normal file
View File

@ -0,0 +1,35 @@
function extensions_apply(_, _){
disable = []
update = []
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){
if(x.name.startsWith("enable_") && ! x.checked)
disable.push(x.name.substr(7))
if(x.name.startsWith("update_") && x.checked)
update.push(x.name.substr(7))
})
restart_reload()
return [JSON.stringify(disable), JSON.stringify(update)]
}
function extensions_check(){
gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){
x.innerHTML = "Loading..."
})
return []
}
function install_extension_from_index(button, url){
button.disabled = "disabled"
button.value = "Installing..."
textarea = gradioApp().querySelector('#extension_to_install textarea')
textarea.value = url
textarea.dispatchEvent(new Event("input", { bubbles: true }))
gradioApp().querySelector('#install_extension_button').click()
}

View File

@ -75,6 +75,7 @@ titles = {
"Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.",
"Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.",
"Inpainting conditioning mask strength": "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.",
"vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).",

View File

@ -13,6 +13,15 @@ function showModal(event) {
}
lb.style.display = "block";
lb.focus()
const tabTxt2Img = gradioApp().getElementById("tab_txt2img")
const tabImg2Img = gradioApp().getElementById("tab_img2img")
// show the save button in modal only on txt2img or img2img tabs
if (tabTxt2Img.style.display != "none" || tabImg2Img.style.display != "none") {
gradioApp().getElementById("modal_save").style.display = "inline"
} else {
gradioApp().getElementById("modal_save").style.display = "none"
}
event.stopPropagation()
}
@ -81,6 +90,25 @@ function modalImageSwitch(offset) {
}
}
function saveImage(){
const tabTxt2Img = gradioApp().getElementById("tab_txt2img")
const tabImg2Img = gradioApp().getElementById("tab_img2img")
const saveTxt2Img = "save_txt2img"
const saveImg2Img = "save_img2img"
if (tabTxt2Img.style.display != "none") {
gradioApp().getElementById(saveTxt2Img).click()
} else if (tabImg2Img.style.display != "none") {
gradioApp().getElementById(saveImg2Img).click()
} else {
console.error("missing implementation for saving modal of this type")
}
}
function modalSaveImage(event) {
saveImage()
event.stopPropagation()
}
function modalNextImage(event) {
modalImageSwitch(1)
event.stopPropagation()
@ -93,6 +121,9 @@ function modalPrevImage(event) {
function modalKeyHandler(event) {
switch (event.key) {
case "s":
saveImage()
break;
case "ArrowLeft":
modalPrevImage(event)
break;
@ -198,6 +229,14 @@ document.addEventListener("DOMContentLoaded", function() {
modalTileImage.title = "Preview tiling";
modalControls.appendChild(modalTileImage)
const modalSave = document.createElement("span")
modalSave.className = "modalSave cursor"
modalSave.id = "modal_save"
modalSave.innerHTML = "🖫"
modalSave.addEventListener("click", modalSaveImage, true)
modalSave.title = "Save Image(s)"
modalControls.appendChild(modalSave)
const modalClose = document.createElement('span')
modalClose.className = 'modalClose cursor';
modalClose.innerHTML = '×'

View File

@ -45,14 +45,14 @@ function switch_to_txt2img(){
return args_to_array(arguments);
}
function switch_to_img2img_img2img(){
function switch_to_img2img(){
gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click();
gradioApp().getElementById('mode_img2img').querySelectorAll('button')[0].click();
return args_to_array(arguments);
}
function switch_to_img2img_inpaint(){
function switch_to_inpaint(){
gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click();
gradioApp().getElementById('mode_img2img').querySelectorAll('button')[1].click();
@ -65,26 +65,6 @@ function switch_to_extras(){
return args_to_array(arguments);
}
function extract_image_from_gallery_txt2img(gallery){
switch_to_txt2img()
return extract_image_from_gallery(gallery);
}
function extract_image_from_gallery_img2img(gallery){
switch_to_img2img_img2img()
return extract_image_from_gallery(gallery);
}
function extract_image_from_gallery_inpaint(gallery){
switch_to_img2img_inpaint()
return extract_image_from_gallery(gallery);
}
function extract_image_from_gallery_extras(gallery){
switch_to_extras()
return extract_image_from_gallery(gallery);
}
function get_tab_index(tabId){
var res = 0

View File

@ -7,6 +7,7 @@ import shlex
import platform
dir_repos = "repositories"
dir_extensions = "extensions"
python = sys.executable
git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "")
@ -16,11 +17,11 @@ def extract_arg(args, name):
return [x for x in args if x != name], name in args
def run(command, desc=None, errdesc=None):
def run(command, desc=None, errdesc=None, custom_env=None):
if desc is not None:
print(desc)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
if result.returncode != 0:
@ -101,7 +102,25 @@ def version_check(commit):
else:
print("Not a git clone, can't perform version check.")
except Exception as e:
print("versipm check failed",e)
print("version check failed", e)
def run_extensions_installers():
if not os.path.isdir(dir_extensions):
return
for dirname_extension in os.listdir(dir_extensions):
path_installer = os.path.join(dir_extensions, dirname_extension, "install.py")
if not os.path.isfile(path_installer):
continue
try:
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {dirname_extension}", custom_env=env))
except Exception as e:
print(e, file=sys.stderr)
def prepare_enviroment():
@ -128,10 +147,12 @@ def prepare_enviroment():
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
sys.argv += shlex.split(commandline_args)
test_argv = [x for x in sys.argv if x != '--tests']
sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
sys.argv, update_check = extract_arg(sys.argv, '--update-check')
sys.argv, run_tests = extract_arg(sys.argv, '--tests')
xformers = '--xformers' in sys.argv
deepdanbooru = '--deepdanbooru' in sys.argv
ngrok = '--ngrok' in sys.argv
@ -187,6 +208,8 @@ def prepare_enviroment():
run_pip(f"install -r {requirements_file}", "requirements for Web UI")
run_extensions_installers()
if update_check:
version_check(commit)
@ -194,6 +217,26 @@ def prepare_enviroment():
print("Exiting because of --exit argument")
exit(0)
if run_tests:
tests(test_argv)
exit(0)
def tests(argv):
if "--api" not in argv:
argv.append("--api")
print(f"Launching Web UI in another process for testing with arguments: {' '.join(argv[1:])}")
with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr:
proc = subprocess.Popen([sys.executable, *argv], stdout=stdout, stderr=stderr)
import test.server_poll
test.server_poll.run_tests()
print(f"Stopping Web UI process with id {proc.pid}")
proc.kill()
def start_webui():
print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")

View File

@ -1,45 +1,33 @@
{
"rtl": true,
"⤡": "⤡",
"⊞": "⊞",
"×": "×",
"": "",
"": "",
"Loading...": "جار التحميل...",
"view": "معاينة",
"api": "api",
"•": "•",
"built with gradio": "مبني باستخدام Gradio",
"Stable Diffusion checkpoint": "نماذج الانتشار المستقر",
"txt2img": "نص لصورة",
"img2img": "صورة لصورة",
"Extras": "الإضافات",
"PNG Info": "معلومات PNG",
"Checkpoint Merger": "دمج النماذج",
"Train": "التدريب",
"Create aesthetic embedding": "Create aesthetic embedding",
"Image Browser": "مستعرض الصور",
"Settings": "الإعدادات",
"Prompt": "الموجه",
"Negative prompt": "الموجه السلبي",
"Run": "تشغيل",
"Skip": "تخطي",
"Interrupt": "إيقاف",
"Generate": "إنشاء",
"Style 1": "نمط 1",
"Style 2": "نمط 2",
"Label": "الوسم",
"File": "ملف",
"Drop File Here": "اسحب الملف هنا",
"-": "-",
"or": "أو",
"Click to Upload": "انقر للتحميل",
"Image": "صورة",
"Check progress": "تحقق من التقدم",
"Check progress (first)": "تحقق من التقدم (الأول)",
"Sampling Steps": "خطوات أخذ العينة",
"Sampling method": "نظام أخذ العينات",
"Loading...": "لحظة...",
"view": "اعرض ",
"api": "واجهة البرمجة",
"built with gradio": "مبني باستخدام gradio",
"Stable Diffusion checkpoint": "أوزان نموذج الإنتشار المسقر",
"txt2img": "نص إلى صورة",
"Prompt": "الطلب",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "الطلب (لبدء الإنتاج Ctrl+Enter أو Alt+Enter اضغط)",
"Negative prompt": "عكس الطلب",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "عكس الطلب (لبدء الإنتاج Ctrl+Enter أو Alt+Enter اضغط)",
"Add a random artist to the prompt.": "أضف فنان عشوائي للطلب",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "اقرأ عوامل الإنتاج من الطلب أو من الإنتاج السابق إذا كان الطلب فارغا",
"Save style": "احتفظ بالطلب وعكسه كإضافة",
"Apply selected styles to current prompt": "ألحق الإضافات المحددة إلى الطلب وعكسه",
"Generate": "أنتج",
"Skip": "تخطى",
"Stop processing current image and continue processing.": "لا تكمل خطوات هذة الحزمة وانتقل إلى الحزمة التالية",
"Interrupt": "توقف",
"Stop processing images and return any results accumulated so far.": "توقف عن الإنتاج واعرض ما تم إلى الآن",
"Style 1": "الإضافة 1",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "الإضافات (styles) عبارة عن كلمات تتكرر كثيرا يتم إلحاقها بالطلب وعكسه عند الرغبة",
"Style 2": "الإضافة 2",
"Do not do anything special": "لا يغير شيئا",
"Sampling Steps": "عدد الخطوات",
"Sampling method": "أسلوب الخطو",
"Which algorithm to use to produce the image": "Sampler: اسم نظام تحديد طريقة تغيير المسافات بين الخطوات",
"Euler a": "Euler a",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral: طريقة مبدعة يمكن أن تنتج صور مختلفة على حسب عدد الخطوات، لا تتغير بعد 30-40 خطوة",
"Euler": "Euler",
"LMS": "LMS",
"Heun": "Heun",
@ -51,406 +39,480 @@
"DPM2 Karras": "DPM2 Karras",
"DPM2 a Karras": "DPM2 a Karras",
"DDIM": "DDIM",
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models: الأفضل في الإنتاج الجزئي",
"PLMS": "PLMS",
"Width": "العرض",
"Height": "الارتفاع",
"Restore faces": "ترميم الوجوه",
"Tiling": "تبليط",
"Highres. fix": "إصلاح الصور عالية الدقة",
"Firstpass width": "عرض المرور الأول",
"Firstpass height": "ارتفاع المرور الأول",
"Denoising strength": "قوة تقليل الضوضاء",
"Batch count": "عدد الدُفعات",
"Batch size": "حجم الدفعة",
"CFG Scale": "مقياس التقارب من الموجه (CFG)",
"Height": "الإرتفاع",
"Restore faces": "تحسين الوجوه",
"Tiling": "ترصيف",
"Produce an image that can be tiled.": "أنتج صور يمكن ترصيفها بجانب بعضها كالبلاط",
"Highres. fix": "إصلاح الدقة العالية",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "أنتج صورة بدقة منخفضة ثم قم برفع الدقة فيما بعد لمنع التشوهات التي تحصل عندما تكون الدقة المطلوبة كبيرة",
"Firstpass width": "العرض الأولي",
"Firstpass height": "الإرتفاع الأولي",
"Denoising strength": "المدى",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Denoising strength: حدد مدى الإبتعاد عن الصورة (عدد الخطوات الفعلي = عدد الخطوات * المدى)",
"Batch count": "عدد الحزم",
"How many batches of images to create": "يتم إنتاج الصور على دفعات، كل دفعة فيها حزمة من الصور",
"Batch size": "حجم الحزمة",
"How many image to create in a single batch": "Batch size: إنتاج حزمة صور أسرع من إنتاجهم فرادى، حدد عدد الصور في كل حزمة",
"CFG Scale": "التركيز",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "CFG scale: يحدد مقدار التركيز على تلبية الطلب وتجنب عكسه، كلما زاد قل الإبداع",
"Seed": "البذرة",
"Extra": "إضافي",
"Variation seed": "تباين البذرة",
"Variation strength": "قوة التباين",
"Resize seed from width": "تغيير حجم البذرة من العرض",
"Resize seed from height": "تغيير حجم البذرة من الارتفاع",
"Open for Clip Aesthetic!": "Open for Clip Aesthetic!",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Seed: رقم طبيعي عشوائي يسمح بإعادة إنتاج نفس الصورة إذا توافقت قيم العوامل الأخرى",
"Set seed to -1, which will cause a new random number to be used every time": "استخدم بذرة جديدة في كل مرة (نرمز لهذا الخيار بجعل قيمة البذرة 1-)",
"Reuse seed from last generation, mostly useful if it was randomed": "أعد استخدام البذرة من الإنتاج السابق",
"Extra": "مزج",
"Variation seed": "بذرة الممزوج",
"Seed of a different picture to be mixed into the generation.": "Variation seed: بذرة صورة أخرى ليتم مزجها مع الصورة الحالية",
"Variation strength": "أثر الممزوج",
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Variation seed strength: مقدار أثر الصورة المدمجة على النتيجة النهائية (0: لا أثر، 1: أثر كامل ما عدا عند استخدام أسلوب خطو سلفي Ancestral)",
"Resize seed from width": "عرض الممزوج",
"Resize seed from height": "إرتفاع الممزوج",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Seed resize from: حدد دقة صورة الممزوج (0: نفس دقة الإنتاج)",
"Open for Clip Aesthetic!": "تضمين تجميلي",
"▼": "▼",
"Aesthetic weight": "Aesthetic weight",
"Aesthetic steps": "Aesthetic steps",
"Aesthetic learning rate": "Aesthetic learning rate",
"Slerp interpolation": "Slerp interpolation",
"Aesthetic imgs embedding": "Aesthetic imgs embedding",
"None": "لايوجد",
"Aesthetic text for imgs": "Aesthetic text for imgs",
"Slerp angle": "Slerp angle",
"Is negative text": "Is negative text",
"Script": "سكريبت",
"Prompt matrix": "مصفوفة الموجهات",
"Prompts from file or textbox": "موجهات من ملف أو مربع النص",
"X/Y plot": "الرسم البياني X/Y",
"Put variable parts at start of prompt": "ضع الأجزاء المتغيرة في بداية الموجه",
"Show Textbox": "إظهار مربع النص",
"File with inputs": "ملف يحتوي المدخلات",
"Prompts": "الموجهات",
"X type": "نوع X",
"Nothing": "لا شئ",
"Var. seed": "تغير البذرة",
"Var. strength": "قوة التغيير",
"Steps": "الخطوات",
"Prompt S/R": "موجه S / R",
"Prompt order": "ترتيب الموجهات",
"Sampler": "نظام أخذ العينات",
"Checkpoint name": "اسم النموذج",
"Aesthetic weight": "أثر التضمين",
"Aesthetic steps": "عدد الخطوات",
"Aesthetic learning rate": "معدل التعلم",
"Slerp interpolation": "امزج بطريقة كروية",
"Aesthetic imgs embedding": "التضمين",
"None": "بدون",
"Aesthetic text for imgs": "الطلب (اختياري)",
"This text is used to rotate the feature space of the imgs embs": "لإعادة توجيه التضمين التجميلي",
"Slerp angle": "أثر الطلب",
"Is negative text": "الطلب عكسي",
"Script": "أدوات خاصة",
"Prompt matrix": "مصفوفة طلبات",
"Put variable parts at start of prompt": "الجزء المتغير في بداية الطلب",
"Prompts from file or textbox": " قائمة طلبات",
"Iterate seed every line": "غير البذرة مع كل طلب",
"List of prompt inputs": "قائمة الطلبات",
"Upload prompt inputs": "اجلب الطلبات من ملف",
"Drop File Here": "اسقط ملف هنا",
"-": "-",
"or": "أو",
"Click to Upload": "انقر للرفع",
"X/Y plot": "مصفوفة عوامل",
"X type": "العامل الأول",
"Nothing": "لا شيء",
"Var. seed": "بذرة الممزوج",
"Var. strength": "أثر الممزوج",
"Steps": "عدد الخطوات",
"Prompt S/R": "كلمات بديلة",
"Prompt order": "ترتيب الكلمات",
"Sampler": "أسلوب الخطو",
"Checkpoint name": "ملف الأوزان",
"Hypernetwork": "الشبكة الفائقة",
"Hypernet str.": "قوة الشبكة الفائقة",
"Sigma Churn": "دفع سيجما",
"Sigma min": "أصغر سيجما",
"Sigma max": "أكبر سيجما",
"Sigma noise": "ضجة سيجما",
"Eta": "الوقت المتوقع",
"Clip skip": "تخطي Clip",
"Denoising": "تقليل الضوضاء",
"X values": "قيم X",
"Y type": "نوع Y",
"Y values": "قيم Y",
"Draw legend": "ارسم مفتاح التوضيح",
"Include Separate Images": "قم بتضمين الصور منفصلة",
"Keep -1 for seeds": "احتفظ بـقيمة -1 للبذور",
"Drop Image Here": "إسقاط الصورة هنا",
"Save": "حفظ",
"Send to img2img": "أرسل إلى صورة لصورة",
"Send to inpaint": "أرسل إلى إعادة الرسم الجزئي",
"Send to extras": "أرسل إلى الإضافات",
"Make Zip when Save?": "إنشاء ملف مضغوط عند الحفظ؟",
"Textbox": "مربع النص",
"Interrogate\nCLIP": "استجواب\n CLIP",
"Inpaint": "إعادة الرسم الجزئي",
"Batch img2img": "دفعات صورة لصورة",
"Image for img2img": "صورة (صورة لصورة)",
"Image for inpainting with mask": "صورة (إعادة الرسم الجزئي)",
"Mask": "القناع",
"Mask blur": "ضبابية القناع",
"Mask mode": "أسلوب القناع",
"Draw mask": "رسم القناع",
"Upload mask": "تحميل القناع",
"Masking mode": "أسلوب التقنيع",
"Inpaint masked": "إعادة الرسم الجزئي (المنطقة المقنعة)",
"Inpaint not masked": "إعادة الرسم الجزئي (المنطقة الغير مقنعة)",
"Masked content": "المحتوى المقنع",
"fill": "الملأ",
"original": "الأصلي",
"latent noise": "الضوضاء الكامنة",
"latent nothing": "لا شيء كامن",
"Inpaint at full resolution": "إعادة الرسم الجزئي بدقة كاملة",
"Inpaint at full resolution padding, pixels": "إعادة الرسم الجزئي بدقة كاملة, الحشو, بيكسل",
"Process images in a directory on the same machine where the server is running.": "معالجة الصور في المجلد على نفس الجهاز حيث يتم تشغيل الخادم.",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "استخدم مجلد إخراج فارغ لحفظ الصور بشكل طبيعي بدلاً من الكتابة إلى مجلد المخرجات.",
"Input directory": "مجلد المدخلات",
"Output directory": "مجلد المخرجات",
"Resize mode": "وضعية تغيير الحجم",
"Just resize": "تغييير الحجم فقط",
"Crop and resize": "اقتصاص وتغيير الحجم",
"Resize and fill": "تغيير الحجم والتعبئة",
"img2img alternative test": "صورة لصورة البديلة",
"Loopback": "الحلقة الراجعة",
"Outpainting mk2": "الرسم الخارجي نسخة 2",
"Poor man's outpainting": "الرسم الخارجي للفقراء",
"SD upscale": "ترقية الانتشار المستقر",
"should be 2 or lower.": "should be 2 or lower.",
"Override `Sampling method` to Euler?(this method is built for it)": "Override `Sampling method` to Euler?(this method is built for it)",
"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Override `prompt` to the same value as `original prompt`?(and `negative prompt`)",
"Original prompt": "Original prompt",
"Original negative prompt": "Original negative prompt",
"Override `Sampling Steps` to the same value as `Decode steps`?": "Override `Sampling Steps` to the same value as `Decode steps`?",
"Decode steps": "Decode steps",
"Override `Denoising strength` to 1?": "Override `Denoising strength` to 1?",
"Decode CFG scale": "Decode CFG scale",
"Randomness": "Randomness",
"Sigma adjustment for finding noise for image": "Sigma adjustment for finding noise for image",
"Loops": "Loops",
"Denoising strength change factor": "معامل قوة تقليل الضوضاء",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "الإعدادات الموصى بها: خطوات أخذ العينات: 80-100 ، طريقة أخذ العينات: Euler a ، تقليل الضوضاء: 0.8",
"Pixels to expand": "عدد البكسل للتوسيع",
"Outpainting direction": "إتجاه الرسم الخارجي",
"Inpainting conditioning mask strength": "قوة قناع الإنتاج الجزئي",
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "حدد مدى صرامة قناع الإنتاج، يصبح القناع شفاف إذا قوته 0 (لا يعمل إلا مع ملفات أوزان الإنتاج الجزئي: inpainting)",
"Sigma Churn": "العشوائية (Schurn)",
"Sigma min": "أدنى تشويش (Stmin)",
"Sigma max": "أقصى تشويش (Stmax)",
"Sigma noise": "التشويش (Snoise)",
"Eta": "العامل Eta η",
"Clip skip": "تخطي آخر طبقات CLIP",
"Denoising": "المدى",
"Cond. Image Mask Weight": "قوة قناع الإنتاج الجزئي",
"X values": "قيم العامل الأول",
"Separate values for X axis using commas.": "افصل القيم بفواصل (,) من اليسار إلى اليمين",
"Y type": "العامل الثاني",
"Y values": "قيم العامل الثاني",
"Separate values for Y axis using commas.": "افصل القيم بفواصل (,) من الأعلى إلى الأسفل",
"Draw legend": "أضف مفتاح التوضيح",
"Include Separate Images": "أضف الصور منفصلة",
"Keep -1 for seeds": "استخدم بذور عشوائية",
"Save": "احفظ",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "احفظ الصور مع ملف العوامل بصيغة CSV",
"Send to img2img": "أرسل لصورة إلى صورة",
"Send to inpaint": "أرسل للإنتاج الجزئي",
"Send to extras": "أرسل للمعالجة",
"Open images output directory": "افتح مجلد الصور المخرجة",
"Make Zip when Save?": "ضع النتائج في ملف مضغوط عند الحفظ",
"img2img": "صورة إلى صورة",
"Interrogate\nCLIP": "استجواب\nCLIP",
"Drop Image Here": "اسقط صورة هنا",
"Just resize": "تغيير الحجم فقط",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "غير حجم الصورة بدون مراعات اتزان الأبعاد",
"Crop and resize": "تغيير الحجم وقص الأطراف",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "غير حجم الصورة واقتص الأطراف الزائدة",
"Resize and fill": "تغيير الحجم وتبطين الأطراف",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "غير حجم الصورة واملأ الأطراف الزائدة بألوان من الصورة",
"img2img alternative test": "استجواب الصورة (تجريبي)",
"should be 2 or lower.": "يفترض أن يكون 2 أو أقل",
"Override `Sampling method` to Euler?(this method is built for it)": "استخدم أسلوب خطو Euler (مستحسن)",
"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "استبدل الطلب وعكسه في الأعلى بالطلب الأصلي وعكسه التاليين",
"Original prompt": "الطلب الأصلي",
"Original negative prompt": "عكس الطلب الأصلي",
"Override `Sampling Steps` to the same value as `Decode steps`?": "استبدل عدد الخطوات بعدد الخطوات الأصلية",
"Decode steps": "عدد الخطوات الأصلية",
"Override `Denoising strength` to 1?": "اجعل المدى 1",
"Decode CFG scale": "التركيز",
"Randomness": "العشوائية",
"Sigma adjustment for finding noise for image": "لا تسمح بتثبيت قيمة التباين",
"Loopback": "اجترار وتكرار",
"Loops": "عدد المرات",
"How many times to repeat processing an image and using it as input for the next iteration": "كم مرة يتم أخذ مخرجات الإنتاج كمدخلات وإعادة الإنتاج مرة أخرى",
"Denoising strength change factor": "معدل تغيير المدى",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "يتم ضرب المدى بهذا الرقم في كل مرة، إذا استخدمت رقم أصغر من 1 يمكن الرسو على نتيجة، وإذا استخدمت رقم أكبر من 1 تصبح النتيجة عشوائية",
"Outpainting mk2": "توسيع الصورة (mk2)",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "يفضل استخدام: 80-100 خطوة، أسلوب Euler a، المدى 0.8",
"Pixels to expand": "عدد البيكسلات",
"Mask blur": "تنعيم القناع",
"How much to blur the mask before processing, in pixels.": "مقدرا تنعيم القناع قبل استخدامه (يقاس بالبيكسل)",
"Outpainting direction": "اتجاه توسيع الصورة",
"left": "يسار",
"right": "يمين",
"up": "فوق",
"down": "تحت",
"Fall-off exponent (lower=higher detail)": "أس التناقص (الأدنى = تفاصيل أعلى)",
"Color variation": "اختلاف اللون",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "سيقوم بترقية الصورة إلى ضعف الأبعاد ؛ استخدم شريط تمرير العرض والارتفاع لضبط حجم التبليط",
"Tile overlap": "تداخل التبليط",
"Upscaler": "المرقي",
"Fall-off exponent (lower=higher detail)": "قوة السقوط (كلما قلت زادت التفاصيل)",
"Color variation": "تنوع الألوان",
"Poor man's outpainting": "توسيع الصورة (بدائي)",
"Masked content": "محتويات القناع",
"What to put inside the masked area before processing it with Stable Diffusion.": "ما يوضع مكان الفراغ في الصورة الذي نريد إنتاج محتوياته",
"fill": "ألوان",
"fill it with colors of the image": "املأ باستخدام ألوان مأخوذة من باقي الصورة",
"original": "بدون تغيير",
"keep whatever was there originally": "أبق محتويات ما تحت القناع كما هي",
"latent noise": "تشويش كامن",
"fill it with latent space noise": "املأه باستخدام تشويش من الفضاء الكامن",
"latent nothing": "تصفير كامن",
"fill it with latent space zeroes": "استبدل مكان القناع في الفضاء الكامن بأصفار",
"SD upscale": "مضاعفة الدقة بنموذج الإنتشار المستقر",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "سيتم تكبير حجم الصورة إلى الضعف، استخدم الطول والإرتفاع في الأعلى لتحديد حجم نافذة المكبر",
"Tile overlap": "تداخل النافذة",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "المكبر ينظر إلى أجزاء الصورة من خلال نافذة لتكبير المحتوى ثم ينتقل إلى الجزء المجاور، يفضل أن يكون هناك تداخل بين كل رقعة لكي لا يكون هناك اختلاف واضح بينهم",
"Upscaler": "طريقة التكبير",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
"ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
"ESRGAN_4x": "ESRGAN_4x",
"SwinIR 4x": "SwinIR 4x",
"Inpaint": "إنتاج جزئي",
"Draw mask": "ارسم القناع",
"Upload mask": "ارفع القناع",
"Inpaint masked": "أنتج ما بداخل القناع",
"Inpaint not masked": "أنتج ما حول القناع",
"Inpaint at full resolution": "إنتاج بالدقة الكاملة",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "كبر ما يراد إعادة إنتاجه ثم صغر النتيجة وألصقها في مكانها",
"Inpaint at full resolution padding, pixels": "عدد بيكسلات التبطين",
"Batch img2img": "صور إلى صور",
"Process images in a directory on the same machine where the server is running.": "حدد مسار مجلد صور موجود في جهاز الخادم",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "يمكنك أيضا تحديد مجلد حفظ النتائج (غير الإفتراضي)",
"Input directory": "مجلد المدخلات",
"Output directory": "مجلد المخرجات",
"Extras": "معالجة",
"Single Image": "صورة واحدة",
"Batch Process": "معالجة الدفعات",
"Batch from Directory": "دفعة من المجلد",
"Source": "مصدر",
"Show result images": "إظهار نتائج الصور ",
"Scale by": "رفع الحجم بمقدار",
"Scale to": "رفع الحجم إلى",
"Source": "المصدر",
"Scale by": "مضاعفة الدقة",
"Resize": "تغيير الحجم",
"Crop to fit": "اقتصاص للتوافق",
"Upscaler 2 visibility": "إظهار المرقي 2",
"GFPGAN visibility": "إظهار GFPGAN",
"CodeFormer visibility": "إظهار CodeFormer",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "وزن CodeFormer (0 = أقصى تأثير ، 1 = تأثير أدنى)",
"Open output directory": "افتح مجلد المخرجات",
"Send to txt2img": "أرسل إلى كتابة لصورة",
"A merger of the two checkpoints will be generated in your": "سيتم إنشاء نموذجمدمج من النموذجين في",
"checkpoint": "النموذج",
"directory.": "المجلد.",
"Primary model (A)": "النموذج الأساسي (أ)",
"Secondary model (B)": "النموذج الثانوي (ب)",
"Tertiary model (C)": "النموذج الثالث (ج)",
"Custom Name (Optional)": "اسم مخصص (اختياري)",
"Multiplier (M) - set to 0 to get model A": "المضاعف (M) - اضبط على 0 للحصول على النموذج أ",
"Interpolation Method": "طريقة الاستنباط",
"Weighted sum": "المجموع الموزون",
"Add difference": "أضافة الاختلاف",
"Save as float16": "حفظ float16",
"See": "شاهد",
"wiki": "ويكي",
"for detailed explanation.": "للحصول على شرح مفصل.",
"Create embedding": "إنشاء التضمين",
"Create hypernetwork": "إنشاء شبكة فائقة",
"Preprocess images": "تجهيز الصور",
"Upscaler 1": "المكبر الأول",
"Upscaler 2": "المكبر الثاني",
"Upscaler 2 visibility": "أثر المكبر الثاني",
"GFPGAN visibility": "أثر GFPGAN (محسن وجوه)",
"CodeFormer visibility": "أثر CodeFormer (محسن وجوه)",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "وزن CodeFormer (يزيد التفاصيل على حساب الجودة)",
"Upscale Before Restoring Faces": "كبر قبل تحسين الوجوه",
"Scale to": "دقة محددة",
"Crop to fit": "قص الأطراف الزائدة إذا لم تتناسب الأبعاد",
"Batch Process": "حزمة صور",
"Batch from Directory": "حزمة من مجلد",
"A directory on the same machine where the server is running.": "مسار مجلد صور موجود في جهاز الخادم",
"Leave blank to save images to the default path.": "اتركه فارغا لاستخدام المسار الإفتراضي",
"Show result images": "اعرض الصور الناتجة",
"PNG Info": "عوامل الصورة",
"Send to txt2img": "أرسل لنص إلى صورة",
"Checkpoint Merger": "مزج الأوزان",
"A merger of the two checkpoints will be generated in your": "سيتم مزج الأوزان التالية وحفظ الأوزان المدجمة مع ",
"checkpoint": "الأوزان",
"directory.": " مجلد.",
"Primary model (A)": "الأوزان الأولى (A)",
"Secondary model (B)": "الأوزان الثانية (B)",
"Tertiary model (C)": "الأوزان الثالثة (C)",
"Custom Name (Optional)": "الاسم الجديد (اختياري)",
"Multiplier (M) - set to 0 to get model A": "العامل M: مسافة الإبتعاد عن الأوزان الأولى A",
"Interpolation Method": "طريقة المزج",
"Weighted sum": "خطية",
"Result = A * (1 - M) + B * M": "النتيجة = A * (1 - M) + B * M",
"Add difference": "جمع الفرق",
"Result = A + (B - C) * M": "النتيجة = A + (B - C) * M",
"Save as float16": "احفظ بدقة float16",
"Run": "تشغيل",
"Train": "تدريب",
"See": "اقرأ",
"wiki": " الـwiki ",
"for detailed explanation.": "لمعرفة المزيد",
"Create embedding": "إنشاء تضمين",
"Name": "الاسم",
"Initialization text": "نص التهيئة",
"Number of vectors per token": "عدد المتجهات لكل رمز",
"Overwrite Old Embedding": "الكتابة فوق التضمين القديم",
"Modules": "الوحدات",
"Enter hypernetwork layer structure": "أدخل بنية طبقة الشبكة الفائقة",
"Select activation function of hypernetwork": "حدد وظيفة تنشيط الشبكة الفائقة",
"linear": "خطي (Linear)",
"relu": "الوحدة الخطية المعدلة (Relu)",
"leakyrelu": "الوحدة الخطية المعدلة المسربة (Leakyrelu)",
"elu": "الوحدة الأسية الخطية (Elu)",
"swish": "Swish",
"Add layer normalization": "أضف طبقة التسوية",
"Use dropout": "استخدم الهبوط",
"Overwrite Old Hypernetwork": "الكتابة فوق الشبكة الفائقة القديمة",
"Source directory": "مجلد المصدر",
"Destination directory": "مجلد النتائج",
"Existing Caption txt Action": "الإجراء النصي للتعليق المتوفر",
"Initialization text": "النص المبدأي",
"Number of vectors per token": "عدد المتجهات لكل وحدة لغوية",
"Overwrite Old Embedding": "استبدل التضمين القديم",
"Create hypernetwork": "إنشاء شبكة فائقة",
"Modules": "الأجزاء",
"Enter hypernetwork layer structure": "ترتيب مضاعفات عرض الطبقات",
"1st and last digit must be 1. ex:'1, 2, 1'": "المضاعفين الأول والأخير يجب أن يكونا 1، مثال: 1, 2, 1",
"Select activation function of hypernetwork": "دالة التنشيط",
"linear": "linear",
"relu": "relu",
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
"tanh": "tanh",
"sigmoid": "sigmoid",
"celu": "celu",
"gelu": "gelu",
"glu": "glu",
"hardshrink": "hardshrink",
"hardsigmoid": "hardsigmoid",
"hardtanh": "hardtanh",
"logsigmoid": "logsigmoid",
"logsoftmax": "logsoftmax",
"mish": "mish",
"prelu": "prelu",
"rrelu": "rrelu",
"relu6": "relu6",
"selu": "selu",
"silu": "silu",
"softmax": "softmax",
"softmax2d": "softmax2d",
"softmin": "softmin",
"softplus": "softplus",
"softshrink": "softshrink",
"softsign": "softsign",
"tanhshrink": "tanhshrink",
"threshold": "threshold",
"Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "تهيئة الأوزان (استخدم Kaiming مع relu وأمثالها وXavier مع sigmoid وأمثالها)",
"Normal": "Normal",
"KaimingUniform": "KaimingUniform",
"KaimingNormal": "KaimingNormal",
"XavierUniform": "XavierUniform",
"XavierNormal": "XavierNormal",
"Add layer normalization": "أضف تسوية الطبقات (LayerNorm)",
"Use dropout": "استخدم الإسقاط (Dropout)",
"Overwrite Old Hypernetwork": "استبدل الشبكة الفائقة القديمة",
"Preprocess images": "معالجة مسبقة للصور",
"Source directory": "مجلد المدخلات",
"Destination directory": "مجلد المخرجات",
"Existing Caption txt Action": "اذا كانت الصورة لديها توصيف (طلب)",
"ignore": "تجاهل",
"copy": "نسخ",
"prepend": "أضف قبل",
"copy": "انسخ",
"prepend": سبق",
"append": "ألحق",
"Create flipped copies": "قم بإنشاء نسخ مقلوبة",
"Split oversized images": "تقسيم الصور كبيرة الحجم",
"Use BLIP for caption": "استخدم BLIP للتعليق",
"Use deepbooru for caption": "استخدم deepbooru للتعليق",
"Split image threshold": "حد تقسيم الصورة",
"Split image overlap ratio": "نسبة تداخل الصورة المنقسمة",
"Preprocess": "تجهيز الصور",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "تدريب التضمين أو الشبكة الفائقة ؛ يجب تحديد مجلد بمجموعة من الصور بنسبة أبعاد 1: 1",
"[wiki]": "[ويكي]",
"Create flipped copies": "انشئ نسخ معكوسة للصور",
"Split oversized images": "قسّم الصور الكبيرة",
"Split image threshold": "حد تقسيم الصور الكبيرة",
"Split image overlap ratio": "نسبة تداخل اقسام الصور الكبيرة",
"Auto focal point crop": "اقتصاص تلقائي",
"Focal point face weight": "تمركز الوجوه",
"Focal point entropy weight": "تمركز التنوع",
"Focal point edges weight": "تمركز الحواف",
"Create debug image": "احفظ نتائج التحليل أيضا",
"Use BLIP for caption": "استخدم BLIP لتوصيف الصور",
"Use deepbooru for caption": "استخدم deepbooru لتوصيف الصور",
"Preprocess": "معالجة مسبقة",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "درب التضمين أو الشبكة الفائقة: يجب تحديد مجلد يحتوي صور مربعة فقط ",
"[wiki]": "[wiki]",
"Embedding": "التضمين",
"Embedding Learning rate": "معدل تعلم التضمين",
"Hypernetwork Learning rate": "معدل تعلم الشبكة الفائقة",
"Dataset directory": "مجلد مجموعة البيانات",
"Path to directory with input images": "مسار مجلد الصور المدخلة",
"Log directory": "مجلد السجل",
"Prompt template file": "ملف قالب الموجهات",
"Max steps": "الخطوات القصوى",
"Save an image to log directory every N steps, 0 to disable": "حفظ صورة في مجلد السجل كل N خطوات ، 0 للتعطيل",
"Save a copy of embedding to log directory every N steps, 0 to disable": "حفظ نسخة من التضمين في مجلد السجل كل N خطوات ، 0 للتعطيل",
"Save images with embedding in PNG chunks": "حفظ التضمين مع الصور في أجزاء PNG",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "قراءة المتغيرات (الموجه ، إلخ ...) من علامة تبويب نص لصورة عند إجراء المعاينات",
"Train Hypernetwork": "تدريب الشبكة الفائقة",
"Train Embedding": "تدريب التضمين",
"Create an aesthetic embedding out of any number of images": "Create an aesthetic embedding out of any number of images",
"Create images embedding": "Create images embedding",
"extras": "إضافات",
"favorites": "المفضلة",
"custom fold": "custom fold",
"Load": "تحميل",
"Path to directory where to write outputs": "مسار مجلد الصور المخرجة",
"Prompt template file": "ملف صيغ الطلبات",
"Max steps": "أقصى عدد لخطوات التدريب",
"Save an image to log directory every N steps, 0 to disable": "احفظ صورة في السجل بعد كل كم خطوة تدريب (إذا 0 لا تحفظ)",
"Save a copy of embedding to log directory every N steps, 0 to disable": "احفظ التضمين في السجل بعد كل كم خطوة تدريب (إذا 0 لا تحفظ)",
"Save images with embedding in PNG chunks": "احفظ التضمين بداخل ملف الصورة كعامل يمكن استخراجه من عوامل الصورة (صيغة PNG)",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "استخدم قيم العوامل الموجودة في تبويب نص إلى صورة لعرض نتائجهم أثناء التدريب",
"Train Hypernetwork": "درّب الشبكة الفائقة",
"Train Embedding": "درّب التضمين",
"Create aesthetic embedding": "تضمين تجميلي",
"Create an aesthetic embedding out of any number of images": "انشئ تضمين تجميلي يعبر عن مجموعة من الصور",
"Create images embedding": "انشئ التضمين التجميلي",
"Image Browser": "معرض الصور",
"Load": "حمّل",
"Images directory": "مجلد الصور",
"Prev batch": "الدفعة السابقة",
"Next batch": "الدفعة التالية",
"First Page": "الصفحة الأولى",
"Prev Page": "الصفحة السابقة",
"Page Index": "فهرس الصفحات",
"Page Index": "رقم الصفحة",
"Next Page": "الصفحة التالية",
"End Page": "صفحة النهاية",
"number of images to delete consecutively next": "عدد الصور المطلوب حذفها على التوالي بعد ذلك",
"Delete": "حذف",
"Generate Info": "معلومات الإنشاء",
"End Page": "الصفحة الأخيرة",
"number of images to delete consecutively next": "عدد الصور المتتالية للحذف",
"Delete": "احذف",
"Generate Info": "معلومات عامة",
"File Name": "اسم الملف",
"Collect": "جمع",
"Refresh page": "إعادة تحميل الصفحة",
"Date to": "التاريخ إلى",
"Number": "الرقم",
"set_index": "وضع الفهرس",
"Checkbox": "صندوق اختيار",
"Apply settings": "تطبيق الإعدادات",
"Saving images/grids": "حفظ الصور / الإطار الشبكي",
"Always save all generated images": "احفظ دائمًا جميع الصور التي تم إنشائها",
"File format for images": "تنسيق ملفات الصور",
"Images filename pattern": "نمط اسم ملفات الصور",
"Add number to filename when saving": "Add number to filename when saving",
"Always save all generated image grids": "احفظ دائمًا جميع الإطارات الشبكية للصور التي تم إنشاؤها",
"File format for grids": "تنسيق ملفات الإطارات الشبكية",
"Add extended info (seed, prompt) to filename when saving grid": "أضف معلومات إضافية (البذرة ، الموجه) إلى اسم الملف عند حفظ الإطار الشبكي",
"Do not save grids consisting of one picture": "لا تحفظ الإطارات الشبكية التي تتكون من صورة واحدة",
"Prevent empty spots in grid (when set to autodetect)": "منع المناطق الفارغة في الإطار الشبكي (عند الضبط على الاكتشاف التلقائي)",
"Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "عدد صفوف الإطار الشبكي استخدم -1 للاكتشاف التلقائي و 0 ليكون نفس حجم الدُفعة",
"Save text information about generation parameters as chunks to png files": "احفظ معلومات نصية حول متغيرات الإنشاء كمقاطع في ملفات png",
"Create a text file next to every image with generation parameters.": "قم بإنشاء ملف نصي بجوار كل صورة باستخدام متغيرات الإنشاء.",
"Save a copy of image before doing face restoration.": "احفظ نسخة من الصورة قبل القيام بترميم الوجوه.",
"Quality for saved jpeg images": "جودة الصور المحفوظة بتنسيق jpeg",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "إذا كانت صورة PNG أكبر من 4 ميجابايت أو كان أي بُعد أكبر من 4000 ، قم بتقليل حجم الصورة وحفظها بتنسيق JPG",
"Use original name for output filename during batch process in extras tab": "استخدم الاسم الأصلي لاسم ملف الإخراج أثناء عملية الدُفعات في علامة تبويب الإضافات",
"When using 'Save' button, only save a single selected image": "عند استخدام زر 'حفظ' ، احفظ فقط صورة واحدة محددة",
"Do not add watermark to images": "لا تقم بإضافة العلامة المائية للصور",
"Paths for saving": "مسارات الحفظ",
"Output directory for images; if empty, defaults to three directories below": "مجلد المخرجات للصور ؛ إذا كان فارغا ، يتم تعيينه افتراضيًا إلى المجلدات الثلاثة أدناه",
"Output directory for txt2img images": "مجلد المخرجات لصور نص لصورة",
"Output directory for img2img images": "مجلد المخرجات لصور صورة لصورة",
"Output directory for images from extras tab": "مجلد المخرجات لصور علامة تبويب الإضافات",
"Output directory for grids; if empty, defaults to two directories below": "مجلد المخرجات للإطارات الشبكية ؛ إذا كان فارغا ، يتم تعيينه افتراضيًا إلى المجلدين أدناه",
"Output directory for txt2img grids": "مجلد المخرجات للإطارات الشبكية نص لصورة",
"Output directory for img2img grids": "مجلد المخرجات للإطارات الشبكية صورة لصورة",
"Directory for saving images using the Save button": "مجلد لحفظ الصور باستخدام زر حفظ",
"Saving to a directory": "يتم الحفظ إلى المجلد..",
"Save images to a subdirectory": "حفظ الصور في مجلد فرعي",
"Save grids to a subdirectory": "حفظ الإطارات الشبكية في مجلد فرعي",
"Collect": "اجمع",
"extras": "معالجة",
"favorites": "المفضلة",
"custom fold": "مجلد آخر",
"Input images directory": "مجلد الصور المدخلة",
"Settings": "إعدادات",
"Apply settings": "طبق الإعدادت",
"Saving images/grids": "حفظ الصور وجداولها",
"Always save all generated images": "احفظ كل الصور المنتجة",
"File format for images": "صيغة ملفات الصور",
"Images filename pattern": "نمط تسمية الصور",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "استخدم الأوسمة التالية لتحديد كيفية تسمية الصور: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp] أو اتركه فارغا إذا أردت",
"Add number to filename when saving": "دائما أضف رقم على اسم الملف",
"Always save all generated image grids": "احفظ جداول الصور دائما",
"File format for grids": "صيغة ملفات جداول الصور",
"Add extended info (seed, prompt) to filename when saving grid": "أضف عوامل الإنتاج (مثل الطلب والبذرة) لأسماء جداول الصور",
"Do not save grids consisting of one picture": "لا تحفظ جدول الصور عند إنتاج صورة واحدة فقط",
"Prevent empty spots in grid (when set to autodetect)": "في الوضع التلقائي امنع الفراغات في جداول الصور",
"Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "عدد صفوف جداول الصور (1-: تلقائي، 0: نفس حجم الحزمة)",
"Save text information about generation parameters as chunks to png files": "احفظ عوامل الإنتاج داخل ملفات الصور (صيغة PNG)",
"Create a text file next to every image with generation parameters.": "انشئ ملف نصي يحتوي على عوامل الإنتاج بجانب كل صورة",
"Save a copy of image before doing face restoration.": "احفظ نسخة من الصورة قبل تحسين الوجوه",
"Quality for saved jpeg images": "جودة حفظ صور JPEG",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "إذا كان حجم ملف صورة PNG أكبر من 4MB أو أحد أبعاد الصورة أكبر من 4000 بيكسل، صغر الصورة واحفظها بصيغة JPEG",
"Use original name for output filename during batch process in extras tab": "استخدم الإسم الأصلي للصور عند معالجتهم في حزم تحت تبويب معالجة",
"When using 'Save' button, only save a single selected image": "احفظ صورة واحدة فقط عند الضغط على حفظ",
"Do not add watermark to images": "لا تضف علامة مائية على الصور",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "الصور المنتجة لن تحتوي على علامة مائية عند تفعيل هذا الخيار، تحذير: ربما قد يدل هذا على أنك تتصرف بشكل غير أخلاقي",
"Paths for saving": "اماكن الحفظ",
"Output directory for images; if empty, defaults to three directories below": "مسار حفظ الصور المخرجة؛ يمكن أن يترك فارغا",
"Output directory for txt2img images": "مسار الصور المخرجة من تبويب نص إلى صورة",
"Output directory for img2img images": "مسار الصور المخرجة من تبويب صورة إلى صورة",
"Output directory for images from extras tab": "مسار الصور المخرجة من تبويب معالجة",
"Output directory for grids; if empty, defaults to two directories below": "مسار حفظ جداول الصور المخرجة؛ يمكن أن يترك فارغا",
"Output directory for txt2img grids": "مسار جداول الصور المخرجة من تبويب نص إلى صورة",
"Output directory for img2img grids": "مسار جداول الصور المخرجة من تبويب صورة إلى صورة",
"Directory for saving images using the Save button": "مسار حفظ الصور عند الضغط على زر الحفظ",
"Saving to a directory": "مجلدات الحفظ",
"Save images to a subdirectory": "احفظ الصور في مجلد فرعي",
"Save grids to a subdirectory": "احفظ جداول الصور في مجلد فرعي",
"When using \"Save\" button, save images to a subdirectory": "احفظ الصور في مجلد فرعي عند الضغط على زر الحفظ",
"Directory name pattern": "نمط اسم المجلد",
"Max prompt words for [prompt_words] pattern": "أقصى عدد لكلمات التوجيه لنمط [كلمات_التوجيه]",
"Upscaling": "الارتقاء",
"Tile size for ESRGAN upscalers. 0 = no tiling.": "حجم التبليط لترقيات ESRGAN, القيمة 0= لا يوجد تبليط.",
"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "تداخل المربعات ، بالبكسل لمرقي ESRGAN. القيم المنخفضة = تداخل مرئي.",
"Tile size for all SwinIR.": "حجم التليط لجميع عمليات SwinIR.",
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "تداخل المربعات بالبكسل لـ SwinIR. القيم المنخفضة = تداخل مرئي.",
"LDSR processing steps. Lower = faster": "خطوات معالجة LDSR. أقل = أسرع",
"Upscaler for img2img": "المرقي لصورة لصورة",
"Upscale latent space image when doing hires. fix": "ترقية صورة الفضاء الكامن عند التعيينات. تصليح",
"Face restoration": "ترميم الوجوه",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "متغيرات وزن CodeFormer ؛ 0 = أقصى تأثير ؛ 1 = الحد الأدنى من التأثير",
"Move face restoration model from VRAM into RAM after processing": "انقل نموذج استعادة الوجوه من VRAM إلى RAM بعد المعالجة",
"System": "نظام",
"VRAM usage polls per second during generation. Set to 0 to disable.": "استطلاعات استخدام VRAM في الثانية أثناء الإنشاء. اضبط على 0 للتعطيل.",
"Always print all generation info to standard output": "قم دائمًا بطباعة جميع معلومات الإنشاء إلى المخرج القياسي",
"Add a second progress bar to the console that shows progress for an entire job.": "أضف شريط تقدم آخر إلى وحدة التحكم يُظهر التدقم الكلي للمهمة.",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "استخدم الأوسمة التالية لتحديد كيفية تسمية المجلدات الفرعية للصور والصور المركبة: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp] أو اتركه فارغا إذا أردت",
"Max prompt words for [prompt_words] pattern": "أقصى عدد كلمات الطلب عند استخدام وسم [prompt_words]",
"Upscaling": "تكبير الصور",
"Tile size for ESRGAN upscalers. 0 = no tiling.": "حجم نافذة المكبر ESRGAN (يمكن أن يكون 0)",
"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "تداخل نافذة المكبر ESRGAN (تتضح الفواصل إذا كان قليل)",
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "حدد ما تريد عرضه كخيار لمكبر من نوع Real-ESRGAN يمكن استخدامه (يتطلب إعادة تشغيل)",
"Tile size for all SwinIR.": "حجم نافذة المكبر SwinIR",
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "تداخل نافذة المكبر SwinIR (تتضح الفواصل إذا كان قليل)",
"LDSR processing steps. Lower = faster": "عدد خطوات مكبر LDSR (أسرع كلما قلت)",
"Upscaler for img2img": "طريقة التكبير تحت تبويب صورة إلى صورة",
"Upscale latent space image when doing hires. fix": "عند اختيار \"إصلاح الدقة العالية\" قم بالتكبير في الفضاء الكامن",
"Face restoration": "تحسين الوجوه",
"Face restoration model": "نموذج تحسين الوجوه",
"Restore low quality faces using GFPGAN neural network": "استخدم نموذج GFPGAN لتحسين الوجوه",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "وزن CodeFormer (يزيد التفاصيل على حساب الجودة)",
"Move face restoration model from VRAM into RAM after processing": "احذف محسن الوجوه من ذاكرة كرت الشاشة (VRAM) بعد استخدامه",
"System": "النظام",
"VRAM usage polls per second during generation. Set to 0 to disable.": "عدد مرات التحقق من ذاكرة كرت الشاشة المستخدمة (VRAM) في الثانية",
"Always print all generation info to standard output": "اطبع دائما كل عوامل الإنتاج",
"Add a second progress bar to the console that shows progress for an entire job.": "أضف شريط تقدم ثاني لعملية الإنتاج الكاملة",
"Training": "التدريب",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "تفريغ VAE و CLIP من VRAM عند التدريب",
"Filename word regex": "اسم ملف كلمات regex",
"Filename join string": "سلسلة أحرف دمج أسماء الملفات",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "عدد التكرارات لصورة إدخال واحدة لكل حقبة ؛ تستخدم فقط لعرض رقم الحقبة",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "حفظ ملف csv يحتوي على الخسارة إلى مجلد السجل كل N خطوات ، 0 للتعطيل",
"Stable Diffusion": "انتشار مستقر",
"Checkpoints to cache in RAM": "التخزين المؤقت للنماذج في RAM",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "قلل استخدام ذاكرة كرت الشاشة (VRAM) عند تدريب الشبكة الفائقة بالتخلص من CLIP وVAE",
"Filename word regex": "التعبير النمطي (RegEx) لكلمات اسم الملف",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "استخدم هذا التعبير لاستخراج كلمات من أسماء الملفات ليتم دمجها بالخيار التالي لتستخدم في التدريب، اتركه فارغا لتستخدم اسم الملف كما هو",
"Filename join string": "النص الفاصل للكلمات المدموجة",
"This string will be used to join split words into a single line if the option above is enabled.": "سيستخدم بين كل كلمة يتم استخراجها من الخيار السابق",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "عدد مرات تكرار الصور في كل دورة (Epoch)، يستخدم للعرض فقط",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "احفظ خسارة التدريب (Loss) في السجل بعد كل كم خطوة (إذا 0 لا تحفظ)",
"Stable Diffusion": "نموذج الإنتشار المستقر (Stable Diffusion)",
"Checkpoints to cache in RAM": "عدد النماذج التي تترك في الذاكرة العشوائية (RAM)",
"Hypernetwork strength": "قوة الشبكة الفائقة",
"Apply color correction to img2img results to match original colors.": "قم بتطبيق تصحيح الألوان على نتائج صورة لصورة لمطابقة الألوان الأصلية.",
"Save a copy of image before applying color correction to img2img results": "احفظ نسخة من الصورة قبل تطبيق تصحيح الألوان على نتائج صورة لصورة",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "باستخدام صورة لصورة ، قم بالضبط بعدد الخطوات التي يحددها شريط التمرير (عادةً ما سيكون أقل مع تقليل التشويش).",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "تمكين تكميم عينات K للحصول على نتائج أوضح وأنظف. هذا قد يغير البذرة المستخدمة. يتطلب إعادة التشغيل للتطبيق.",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "التأكيد: استخدم (النص) لجعل النموذج يولي مزيدًا من الاهتمام للنص و استخدم [النص] لجعل النموذج يولي اهتمامًا أقل للنص",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "استخدم طريقة التأكيد القديمة. يمكن أن يكون مفيدًا لإعادة إنتاج البذور القديمة.",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "اجعل انظمة أخذ عينات الانتشار K تنتج نفس الصور في الدفعات كما هو الحال عند إنشاء صورة واحدة",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "زيادة التناسق عن طريق المساحة المتروكة من الفاصلة الأخيرة ضمن عدد n من الرموز المميزة عند استخدام أكثر من 75 رمزًا مميزًا",
"Filter NSFW content": "تصفية المحتوى الإباحي",
"Stop At last layers of CLIP model": "توقف عند آخر طبقات نموذج CLIP",
"Apply color correction to img2img results to match original colors.": "صحح ألوان نتائج صورة إلى صورة لتشابه الصورة الأصلية",
"Save a copy of image before applying color correction to img2img results": "احفظ نسخة من الصور المنتجة من صورة إلى صورة قبل عملية تصحيح الألوان",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "استخدم بالضبط عدد الخطوات المحددة بالرغم عن المدى (عدد الخطوات الإفتراضي = عدد الخطوات / المدى)",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "فعل التجزيء الكمي (Quantization) لأغلب أساليب الخطو (k-diffusion) للحصول على صور أنظف (يتطلب إعادة تشغيل)",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "أحط أي كلمة أو عبارة في الطلب أو عكسه بأقواس () للتشديد أو [] للتيسير",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "استخدم الطريقة القديمة للتشديد (إذا كانت لديك نتائج قديمة تريد تجربتها)",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "عدم تأثير تحزيم الصور على البذور عند استخدام أساليب خطو (k-diffusion)",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "عند استخدام طلب طويل أكبر من 75 وحدة لغوية، افصل الطلب عند أخر فاصلة قبل كم كلمة",
"Filter NSFW content": "انتق المحتوى النظيف فقط",
"Stop At last layers of CLIP model": "قف عند آخر كم طبقة لنموذج CLIP",
"Allowed categories for random artists selection when using the Roll button": "اختر اهتمامات الفنانين المسموح بإضافتهم للطلب",
"Interrogate Options": "خيارات الاستجواب",
"Interrogate: keep models in VRAM": "الاستجواب: احتفظ بالنماذج في VRAM",
"Interrogate: use artists from artists.csv": "الاستجواب: استخدم قائمة الفنانين من ملف artists.csv",
"Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "الاستجواب: قم بتضمين رتب علامات النموذج في النتائج (ليس له تأثير على الاستجواب المستند إلى التعليقات).",
"Interrogate: num_beams for BLIP": "الاستجواب: عدد الحزم لـ BLIP",
"Interrogate: minimum description length (excluding artists, etc..)": "الاستجواب: الحد الأدنى لطول الوصف (باستثناء الفنانين ،إلخ..)",
"Interrogate: maximum description length": "الاستجواب: الحد الأقصى لطول الوصف",
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: أقصى عدد من الأسطر في الملف النصي (0 = بلا حدود)",
"Interrogate: deepbooru score threshold": "الاستجواب: عتبة درجات deepbooru",
"Interrogate: deepbooru sort alphabetically": "الاستجواب: الترتيب الأبجدي لـ deepbooru",
"use spaces for tags in deepbooru": "استخدام مسافات للعلامات في deepbooru",
"Interrogate: keep models in VRAM": "ابق نموذج الاستجواب في ذاكرة كرت الشاشة (VRAM)",
"Interrogate: use artists from artists.csv": "استخدم قائمة الفنانين في الاستجواب من ملف artists.csv",
"Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "أدرج رتب الشعارات عند الاستجواب (لا تعمل مع جميع النماذج)",
"Interrogate: num_beams for BLIP": "عدد أشعة الاستجواب لنموذج BLIP",
"Interrogate: minimum description length (excluding artists, etc..)": "أقل عدد كلمات للتوصيف (لا يتضمن الفنانين وغيرهم)",
"Interrogate: maximum description length": "أكثر عدد كلمات للتوصيف",
"CLIP: maximum number of lines in text file (0 = No limit)": "أكثر عدد أسطر لملف نصي عند الاستجواب باستخدام CLIP (0 = بدون حد)",
"Interrogate: deepbooru score threshold": "حد درجة الاستجواب باستخدام deepbooru",
"Interrogate: deepbooru sort alphabetically": "رتب نتائج توصيف deepbooru أبجديا",
"use spaces for tags in deepbooru": "افصل شعارات deepbooru بمسافات",
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "تجاهل الأقواس عند استخدام deepbooru (كي لا تعامل كأقواس التشديد)",
"User interface": "واجهة المستخدم",
"Show progressbar": "إظهار شريط التقدم",
"Show image creation progress every N sampling steps. Set 0 to disable.": "إظهار تقدم إنشاء الصورة لكل N خطوة من خطوات أخذ العينات. قم بتعيين 0 للتعطيل.",
"Show previews of all images generated in a batch as a grid": "إظهار معاينة لجميع الصور التي يتم إنشائها في الدفعة كإطار شبكي",
"Show grid in results for web": "إظهار الإطار الشبكي في نتائج الويب",
"Do not show any images in results for web": "لا تظهر أي صور في نتائج الويب",
"Add model hash to generation information": "أضف معلومات الهاش للنموذج إلى معلومات الإنشاء",
"Add model name to generation information": "أضف اسم النموذج إلى معلومات الإنشاء",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "عند قراءة متغيرات الإنشاء من النص إلى واجهة المستخدم (من معلومات PNG أو النص الملصق) ، لا تقم بتغيير النموذج",
"Font for image grids that have text": "الخط المستخدم للإطارات الشبكية للصور التي تحتوي على نص",
"Enable full page image viewer": "تفعيل عارض الصور ذو الصفحة الكاملة",
"Show images zoomed in by default in full page image viewer": "إظهار الصور التي تم تكبيرها بشكل افتراضي في عارض الصور ذو الصفحة الكاملة",
"Show generation progress in window title.": "إظهار تقدم الإنشاء في عنوان الشاشة.",
"Show progressbar": "اظهر شريط التقدم",
"Show image creation progress every N sampling steps. Set 0 to disable.": "اعرض صورة بعد كل كم خطوة (إذا 0 لا تعرض)",
"Show previews of all images generated in a batch as a grid": "اعرض كل الصور التي تم إنتاجها في حزمة كجدول",
"Show grid in results for web": "أظهر نتائج جداول الصور",
"Do not show any images in results for web": "لا تظهر نتائج الصور",
"Add model hash to generation information": "أضف رمز تهشير (Hash) ملف الأوزان لعوامل الإنتاج",
"Add model name to generation information": "أضف اسم ملف الأوزان لعوامل الإنتاج",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "لا تغير الأوزان المختارة عند قراءة عوامل الإنتاج من صورة أو من ملف",
"Send seed when sending prompt or image to other interface": "عند إرسال صورة أو طلب ألحق البذرة أيضا",
"Font for image grids that have text": "نوع الخط في جداول الصور التي تحتوي على نصوص",
"Enable full page image viewer": "اسمح بعرض الصور في وضع ملئ الشاشة",
"Show images zoomed in by default in full page image viewer": "اعرض الصور مقربة عند استخدام وضع ملئ الشاشة",
"Show generation progress in window title.": "أظهر التقدم في عنوان النافذة",
"Quicksettings list": "قائمة الإعدادات السريعة",
"Localization (requires restart)": "الترجمة (يتطلب إعادة التشغيل)",
"Sampler parameters": "متغيرات أنظمة اخذ العينات",
"Hide samplers in user interface (requires restart)": "إخفاء أنظمة أخذ العينات في واجهة المستخدم (يتطلب إعادة التشغيل)",
"eta (noise multiplier) for DDIM": "الوقت المتبقي (مضاعف الضوضاء) لـ DDIM",
"eta (noise multiplier) for ancestral samplers": "الوقت المتبقي (مضاعف الضوضاء) لأنظمة أخذ العينات التي تعتمد على الأجداد",
"img2img DDIM discretize": "تفصل DDIM لصورة لصورة",
"uniform": "التوحيد",
"quad": "رباعية",
"sigma churn": "دفع سيجما",
"sigma tmin": "سيجما tmin",
"sigma noise": "ضجيج سيجما",
"Eta noise seed delta": "الوقت المتوقع لديلتا بذرة الضجيج ",
"Images Browser": "مستعرض الصور",
"Preload images at startup": "تحميل الصور مسبقًا عند بدء التشغيل",
"Number of columns on the page": "Number of columns on the page",
"Number of rows on the page": "Number of rows on the page",
"Minimum number of pages per load": "الحد الأدنى لعدد الصفحات لكل تحميل",
"Request browser notifications": "طلب إشعارات المتصفح",
"Download localization template": "تنزيل نموذج الترجمة",
"Reload custom script bodies (No ui updates, No restart)": "إعادة تحميل السكريبتات المخصصة (لا توجد تحديثات لواجهة المستخدم ، لا إعادة تشغيل)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "أعادة تشغيل Gradio و تحديث مكونات واجهة الاستخدام (السكريبتات و ui.py و js و css فقط)",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "الموجه (اضغط على Ctrl + Enter أو Alt + Enter للإنشاء)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "الموجه السلبي (اضغط على Ctrl + Enter أو Alt + Enter للإنشاء)",
"Add a random artist to the prompt.": "أضف فنانًا عشوائيًا إلى الموجه.",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "قراءة متغيرات الإنشاء من الموجه أو الإنشاء الأخير إذا كان الموجه فارغا في واجهة المستخدم.",
"Save style": "حفظ النمط",
"Apply selected styles to current prompt": "تطبيق الأنماط المحددة للموجه الحالي",
"Stop processing current image and continue processing.": "توقف عن معالجة الصورة الحالية واستمر في المعالجة.",
"Stop processing images and return any results accumulated so far.": "توقف عن معالجة الصور وقم بإرجاع أي نتائج متراكمة حتى الآن.",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "النمط المطلوب تطبيقه ؛ الأنماط لها مكونات لكل من الموجهات الإيجابية والسلبية وتنطبق على كليهما",
"Do not do anything special": "لا تفعل أي شيء خاص",
"Which algorithm to use to produce the image": "الخوارزمية التي يجب استخدامها لإنتاج الصورة",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - مبدع جدًا ، يمكن لكل منها الحصول على صورة مختلفة تمامًا اعتمادًا على عدد الخطوات ، تعيين الخطوات على أعلى من 30-40 لا يعطي نتيجة مفيدة",
"Denoising Diffusion Implicit Models - best at inpainting": "نماذج تقليل التشويش الضمني - أفضل اسخدام في الرسم الجزئي",
"Produce an image that can be tiled.": "أنتج صورة يمكن تبليطها.",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "استخدم عملية من خطوتين لإنشاء صورة جزئيًا بدقة أقل ثم ترقيتها و تحسين تفاصيلها دون تغيير التكوين",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "يحدد مدى ضآلة احترام الخوارزمية لمحتوى الصورة. عند 0 ، لن يتغير شيء ، وعند 1 ستحصل على صورة لا صلة لها بالصورة الأصلية. مع وجود قيم أقل من 1.0 ، المعالجة ستأخذ خطوات أقل مما يحدده شريط تمرير خطوات المعالجة.",
"How many batches of images to create": "كم عدد دفعات الصور التي يتم إنشائها",
"How many image to create in a single batch": "كم صورة يتم إنشائها دفعة واحدة",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "مقياس التوجيه الحر للمصنف - إلى أي مدى يجب أن تتوافق الصورة مع الموجه - القيم المنخفضة تنتج نتائج أكثر إبداعًا",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "القيمة التي تحدد ناتج منشئ الأرقام العشوائية - إذا قمت بإنشاء صورة بنفس المتغيرات و بذرة الصورة ، فستحصل على نفس النتيجة",
"Set seed to -1, which will cause a new random number to be used every time": "عيّن البذرة إلى -1 ، مما سيؤدي إلى استخدام رقم عشوائي جديد في كل مرة",
"Reuse seed from last generation, mostly useful if it was randomed": "إعادة استخدام البذرة من الإنشاء الأخير ، وتكون مفيدة في الغالب إذا كانت عشوائية",
"Seed of a different picture to be mixed into the generation.": "استخدام بذرة من صورة مختلفة ليتم مزجها في الإنشاء.",
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "ما مدى قوة التباين عند الإنشاء. 0 ، لن يكون هناك أي تأثير. 1 ، ستحصل على الصورة الكاملة ببذور التباين (باستثناء أنظمة عينات الأسلاف ، حيث ستحصل على شيء ما فقط).",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "قم بمحاولة لإنتاج صورة مشابهة لما تم إنتاجه بنفس البذرة بناءا على دقة محددة",
"This text is used to rotate the feature space of the imgs embs": "This text is used to rotate the feature space of the imgs embs",
"Separate values for X axis using commas.": "افصل بين قيم المحور X باستخدام الفواصل.",
"Separate values for Y axis using commas.": "افصل بين قيم المحور Y باستخدام الفواصل.",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "اكتب الصورة إلى مجلد (الافتراضي - log/images) وتوليد المتغيرات في ملف csv.",
"Open images output directory": "افتح مجلد مخرجات الصور",
"How much to blur the mask before processing, in pixels.": "كم يتم تضبيب القناع قبل المعالجة ، بالبكسل.",
"What to put inside the masked area before processing it with Stable Diffusion.": "ما يجب وضعه داخل المنطقة المقنعة قبل معالجتها باستخدام الانتشار المستقر.",
"fill it with colors of the image": "املأها بألوان الصورة",
"keep whatever was there originally": "احتفظ بكل ما كان هناك في الأصل",
"fill it with latent space noise": "املأه بضوضاء الفضاء الكامنة",
"fill it with latent space zeroes": "املأه بأصفار الفضاء الكامنة",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "قم بترقية المنطقة المقنعة إلى الدقة المستهدفة و الرسم الجزئي ثم تقليص الحجم واللصق في الصورة الأصلية",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "تغيير حجم الصورة إلى الدقة المستهدفة. ما لم يتطابق الطول والعرض ، ستحصل على نسبة عرض إلى ارتفاع غير صحيحة.",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "قم بتغيير حجم الصورة بحيث يتم ملء الدقة المستهدفة بالكامل بالصورة. يتم قطع المناطق التي تبرز.",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "قم بتغيير حجم الصورة بحيث تكون الصورة بأكملها داخل الدقة المستهدفة. املأ المساحة الفارغة بألوان الصورة.",
"How many times to repeat processing an image and using it as input for the next iteration": "كم مرة يتم تكرار معالجة صورة واستخدامها كمدخل للتكرار التالي",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "في وضع حلقة الاسترجاع ، يتم ضرب قوة تقليل الضوضاء بهذه القيمة في كل حلقة. يعني <1 تقليل التنوع بحيث يتقارب التسلسل الخاص بك على صورة ثابتة. > 1 يعني زيادة التنوع بحيث يصبح تسلسلك أكثر وأكثر فوضوية.",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "الترقية باستخدام الانتشار المستقر ، كم يجب أن يكون مقدار التداخل بالبكسل بين المربعات. تتداخل المربعات بحيث لا يكون هناك خط واضح للعيان عند دمجها مرة أخرى في صورة واحدة.",
"A directory on the same machine where the server is running.": "مجلد على نفس الجهاز حيث يتم تشغيل الخادم.",
"Leave blank to save images to the default path.": "اتركه فارغًا لحفظ الصور في المسار الافتراضي.",
"Result = A * (1 - M) + B * M": "النتيجة = A * (1 - M) + B * M",
"Result = A + (B - C) * M": "النتيجة = A + (B - C) * M",
"1st and last digit must be 1. ex:'1, 2, 1'": "يجب أن يكون الرقم الأول والأخير 1. على سبيل المثال: '1 ، 2 ، 1'",
"Path to directory with input images": "المسار إلى مجلد الصور المدخلة",
"Path to directory where to write outputs": "المسار إلى مجلد كتابة النتائج",
"Input images directory": "مجلد الصور المدخلة",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "استخدم العلامات التالية لتعريف كيفية اختيار أسماء الملفات للصور: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp] ؛ اتركه فارغا للوضع الافتراضي.",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "إذا تم تمكين هذا الخيار ، فلن تتم إضافة العلامة المائية إلى الصور التي تم إنشاؤها. تحذير: إذا لم تقم بإضافة علامة مائية ، فقد تكون تتصرف بطريقة غير أخلاقية.",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "استخدم العلامات التالية لتحديد كيفية اختيار المجادت الفرعية للصور و الإطارات الشبكية : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp] ؛ اتركه فارغا للوضع الافتراضي.",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "سيتم استخدام هذا التعبير العادي لاستخراج الكلمات من اسم الملف ، وسيتم ضمها باستخدام الخيار أدناه في نص التسمية المستخدم للتدريب. اتركه فارغًا للاحتفاظ بنص اسم الملف كما هو.",
"This string will be used to join split words into a single line if the option above is enabled.": "سيتم استخدام هذا النص لضم الكلمات المقسمة إلى سطر واحد إذا تم تمكين الخيار أعلاه.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "قائمة أسماء الإعدادات ، مفصولة بفواصل ، للإعدادات التي يجب أن تنتقل إلى شريط الوصول السريع في الأعلى ، بدلاً من علامة تبويب الإعداد المعتادة. انظر modules / shared.py للتعرف على أسماء الإعدادات. يتطلب إعادة التشغيل للتطبيق.",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "إذا كانت هذه القيم غير صفرية ، فستتم إضافتها إلى البذرة واستخدامها لتهيئة فوضى الضوضاء عند استخدام أنظمة أخذ العينات باستخدام الوقت المتبقي. يمكنك استخدام ذلك لإنتاج المزيد من التنوعات في الصور ، أو يمكنك استخدامه لمطابقة صور البرامج الأخرى إذا كنت تعرف ما تفعله."
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "قائمة مقسمة بفواصل لأسماء الإعدادات التي يجب أن تظهر في الأعلى لتسهيل الوصول إليها، انظر إلى modules/shared.py لمعرفة الأسماء، يتطلب إعادة تشغيل",
"Localization (requires restart)": "اللغة (تتطلب إعادة تشغيل)",
"pt_BR": "البرتغالية",
"zh_CN": "الصينية",
"ko_KR": "الكورية",
"fr_FR": "الفرنسية",
"ru_RU": "الروسية",
"ar_AR": "العربية",
"tr_TR": "التركية",
"it_IT": "الإيطالية",
"ja_JP": "اليابانية",
"de_DE": "الألمانية",
"zh_TW": "الصينية (تايوان)",
"es_ES": "الإسبانية",
"Sampler parameters": "عوامل أساليب الخطو",
"Hide samplers in user interface (requires restart)": "اخف أساليب الخطو التالية (يتطلب إعادة تشغيل)",
"eta (noise multiplier) for DDIM": "العامل Eta η لأسلوب الخطو DDIM",
"eta (noise multiplier) for ancestral samplers": "العامل Eta η لأساليب الخطو السلفية (Ancestral)",
"img2img DDIM discretize": "طريقة التقطيع (Discretization) لأسلوب الخطو DDIM في وضع صورة إلى صورة",
"uniform": "خطية",
"quad": "تربيعية",
"sigma churn": "العشوائية (Schurn)",
"sigma tmin": "أدنى تشويش (Stmin)",
"sigma noise": "التشويش (Snoise)",
"Eta noise seed delta": "إزاحة بذرة أساليب الخطو التي تستعمل العامل Eta η",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "هذا الرقم سيتم إضافته إلى البذرة عند استخدام أحد أساليب الخطو التي تستعمل العامل Eta η، يفيد هذا الخيار في مشابهة نتائج بعض البرامج الأخرى التي تستعمله",
"Images Browser": "معرض الصور",
"Preload images at startup": "حمل الصور عند بدء التشغيل",
"Number of columns on the page": "عدد الأعمدة في كل صفحة",
"Number of rows on the page": "عدد الصفوف في كل صفحة",
"Minimum number of pages per load": "أقل عدد صور يتم تحميلها كل مرة",
"Request browser notifications": "اطلب تنبيهات المتصفح",
"Download localization template": "حمل ملف اللغة",
"Reload custom script bodies (No ui updates, No restart)": "أعد تحميل الأدوات الخاصة (بدون واجهة المستخدم ولا يحتاج إعادة تشغيل)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "أعد تشغيل gradio وتحميل الأدوات الخاصة وواجهة المستخدم",
"⤡": "⤡",
"⊞": "⊞",
"×": "×",
"": "",
"": "",
"•": "•",
"Label": "Label",
"File": "File",
"Image": "Image",
"Check progress": "Check progress",
"Check progress (first)": "Check progress (first)",
"Textbox": "Textbox",
"Image for img2img": "Image for img2img",
"Image for inpainting with mask": "Image for inpainting with mask",
"Mask": "Mask",
"Mask mode": "Mask mode",
"Masking mode": "Masking mode",
"Resize mode": "Resize mode",
"Prev batch": "Prev batch",
"Next batch": "Next batch",
"Refresh page": "Refresh page",
"Date to": "Date to",
"Number": "Number",
"set_index": "set_index",
"Checkbox": "Checkbox"
}

458
localizations/de_DE.json Normal file
View File

@ -0,0 +1,458 @@
{
"⤡": "⤡",
"⊞": "⊞",
"×": "×",
"": "",
"": "",
"view": "API ",
"api": "anzeigen",
"•": " • ",
"built with gradio": "Mit Gradio erstellt",
"Loading...": "Lädt...",
"Stable Diffusion checkpoint": "Stable Diffusion Checkpoint",
"txt2img": "txt2img",
"img2img": "img2img",
"Extras": "Extras",
"PNG Info": "PNG Info",
"Checkpoint Merger": "Checkpoint Fusion",
"Train": "Trainieren",
"Settings": "Einstellungen",
"Prompt": "Prompt",
"Negative prompt": "Negative Prompt",
"Run": "Ausführen",
"Skip": "Überspringen",
"Interrupt": "Abbrechen",
"Generate": "Generieren",
"Style 1": "Stil 1",
"Style 2": "Stil 2",
"Label": "Bezeichnung",
"File": "Datei",
"Drop File Here": "Datei hier ablegen",
"-": "-",
"o": "oder",
"Click to Upload": "Hochladen",
"Image": "Bild",
"Check progress": "Fortschitt prüfen",
"Check progress (first)": "Fortschritt prüfen (Initial)",
"Sampling Steps": "Samplingschritte",
"Sampling method": "Samplingmethode",
"Euler a": "Euler a",
"Euler": "Euler",
"LMS": "LMS",
"Heun": "Heun",
"DPM2": "DPM2",
"DPM2 a": "DPM2 a",
"DPM fast": "DPM fast",
"DPM adaptive": "DPM adaptive",
"LMS Karras": "LMS Karras",
"DPM2 Karras": "DPM2 Karras",
"DPM2 a Karras": "DPM2 a Karras",
"DDIM": "DDIM",
"PLMS": "PLMS",
"Width": "Breite",
"Height": "Höhe",
"Restore faces": "Gesichter wiederherstellen",
"Tiling": "Kacheln",
"Highres. fix": "Highres. Fix",
"Firstpass width": "Breite Erstdurchlauf",
"Firstpass height": "Höhe Erstdurchlauf",
"Denoising strength": "Denoisingstärke",
"Batch count": "Batchanzahl",
"Batch size": "Batchgröße",
"CFG Scale": "CFG-Skala",
"Seed": "Seed",
"Extra": "Extra",
"Variation seed": "Variationsseed",
"Variation strength": "Variationsstärke",
"Resize seed from width": "Seed von Breite ändern",
"Resize seed from height": "Seed von Höhe ändern",
"Script": "Skript",
"None": "Nichts",
"Prompt matrix": "Promptmatrix",
"Prompts from file or textbox": "Prompts aus Datei oder Textfeld",
"X/Y plot": "X/Y Graph",
"Put variable parts at start of prompt": "Variable teile am start des Prompt setzen",
"Iterate seed every line": "Iterate seed every line",
"List of prompt inputs": "List of prompt inputs",
"Upload prompt inputs": "Upload prompt inputs",
"X type": "X-Typ",
"Nothing": "Nichts",
"Var. seed": "Var. seed",
"Var. strength": "Var. strength",
"Steps": "Schritte",
"Prompt S/R": "Prompt Suchen/Ersetzen",
"Prompt order": "Promptreihenfolge",
"Sampler": "Sampler",
"Checkpoint name": "Checkpointname",
"Hypernetwork": "Hypernetwork",
"Hypernet str.": "Hypernet str.",
"Sigma Churn": "Sigma Churn",
"Sigma min": "Sigma min",
"Sigma max": "Sigma max",
"Sigma noise": "Sigma noise",
"Eta": "Eta",
"Clip skip": "Clip skip",
"Denoising": "Denoising",
"Cond. Image Mask Weight": "Cond. Image Mask Weight",
"X values": "X-Werte",
"Y type": "Y-Typ",
"Y values": "Y-Werte",
"Draw legend": "Legende zeichnen",
"Include Separate Images": "Seperate Bilder hinzufügen",
"Keep -1 for seeds": "-1 als Seed behalten",
"Save": "Speichern",
"Send to img2img": "An img2img senden",
"Send to inpaint": "An Inpaint senden",
"Send to extras": "An Extras senden",
"Make Zip when Save?": "Zip beim Speichern erstellen?",
"Textbox": "Textfeld",
"Interrogate\nCLIP": "Interrogate\nCLIP",
"Interrogate\nDeepBooru": "Interrogate\nDeepBooru",
"Inpaint": "Inpaint",
"Batch img2img": "Batch img2img",
"Image for img2img": "Bild für img2img",
"Drop Image Here": "Bild hier ablegen",
"Image for inpainting with mask": "Bild für inpainting mit Maske",
"Mask": "Maske",
"Mask blur": "Maskenunschärfe",
"Mask mode": "Maskenmodus",
"Draw mask": "Maske zeichnen",
"Upload mask": "Maske hochladen",
"Masking mode": "Maskierungsmodus",
"Inpaint masked": "Maskiertes inpainten",
"Inpaint not masked": "Nicht maskiertes inpainten",
"Masked content": "Maskierter Inhalt",
"fill": "ausfüllen",
"original": "original",
"latent noise": "latent noise",
"latent nothing": "latent nothing",
"Inpaint at full resolution": "Inpaint mit voller Auflösung",
"Inpaint at full resolution padding, pixels": "Inpaint bei voller Auflösung Abstand, Pixel",
"Process images in a directory on the same machine where the server is running.": "Bilder in einem Verzeichnis auf demselben Rechner verarbeiten, auf dem der Server läuft.",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "Ein leeres Ausgabeverzeichnis verwenden, um Bilder normal zu speichern, anstatt in das Ausgabeverzeichnis zu schreiben.",
"Input directory": "Eingabeverzeichnis",
"Output directory": "Ausgabeverzeichnis",
"Resize mode": "Größenänderungsmodus",
"Just resize": "Nur Größe anpassen",
"Crop and resize": "Zuschneiden und Größe anpassen",
"Resize and fill": "Größe anpassen und ausfüllen",
"img2img alternative test": "img2img alternativer Test",
"Loopback": "Loopback",
"Outpainting mk2": "Outpainting mk2",
"Poor man's outpainting": "Poor man's outpainting",
"SD upscale": "SD-Upscale",
"should be 2 or lower.": "Sollte 2 oder niedriger sein.",
"Override `Sampling method` to Euler?(this method is built for it)": "`Samplingmethode` auf Euler setzen? (Diese Methode is dafür ausgelegt)",
"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "`Prompt` auf denselben Wert wie `Originale Prompt` (und `Negative Prompt`) setzen?",
"Original prompt": "Originale Prompt",
"Original negative prompt": "Originale negative Prompt",
"Override `Sampling Steps` to the same value as `Decode steps`?": "`Samplingschritte` auf denselben Wert wie `Dekodierschritte` setzen?",
"Decode steps": "Dekodierschritte",
"Override `Denoising strength` to 1?": "`Denoisingstärke auf 1 setzen?",
"Decode CFG scale": "CFG-Skala dekodieren",
"Randomness": "Zufälligkeit",
"Sigma adjustment for finding noise for image": "Sigma-Anpassung für die Suche nach Noise des Bildes",
"Loops": "Schleifen",
"Denoising strength change factor": "Denoising strength change factor",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Empfohlene Einstellungen: Samplingschritte: 80-100, Samplermethode: Euler a, Denoisingstärke: 0.8",
"Pixels to expand": "Pixel zum Erweitern",
"Outpainting direction": "Outpainting Richtung",
"left": "Links",
"right": "Rechts",
"up": "Hoch",
"down": "Runter",
"Fall-off exponent (lower=higher detail)": "Abfallexponent (niedriger=mehr Details)",
"Color variation": "Farbabweichung",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Skaliert das Bild auf die doppelte Größe; Benutze die Schieberegler für Breite und Höhe, um die Kachelgröße einzustellen",
"Tile overlap": "Kachelüberlappung",
"Upscaler": "Upscaler",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
"SwinIR 4x": "SwinIR 4x",
"ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
"ESRGAN_4x": "ESRGAN_4x",
"Single Image": "Einzelnes Bild",
"Batch Process": "Batchverarbeitung",
"Batch from Directory": "Batchverarbeitung aus Verzeichnis",
"Source": "Quelle",
"Show result images": "Bildergebnisse zeigen",
"Scale by": "Skalieren um",
"Scale to": "Skalieren zu",
"Resize": "Größe anpassen",
"Crop to fit": "Zuschneiden damit es passt",
"Upscaler 2 visibility": "Upscaler 2 Sichtbarkeit",
"GFPGAN visibility": "GFPGAN Sichtbarkeit",
"CodeFormer visibility": "CodeFormer Sichtbarkeit",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer Gewichtung (0 = maximale Wirkung, 1 = minimale Wirkung)",
"Upscale Before Restoring Faces": "Upscale Before Restoring Faces",
"Send to txt2img": "An txt2img senden",
"A merger of the two checkpoints will be generated in your": "Die zusammgeführten Checkpoints werden gespeichert unter",
"checkpoint": "Checkpoint",
"directory.": "Verzeichnis.",
"Primary model (A)": "Primäres Modell (A)",
"Secondary model (B)": "Sekundäres Modell (B)",
"Tertiary model (C)": "Tertiäres Modell (C)",
"Custom Name (Optional)": "Eigener Name (Optional)",
"Multiplier (M) - set to 0 to get model A": "Multiplikator (M) - auf 0 setzen, um Modell A zu erhalten",
"Interpolation Method": "Interpolationsmethode",
"Weighted sum": "Weighted sum",
"Add difference": "Add difference",
"Save as float16": "Speichern als float16",
"See": "Siehe ",
"wiki": "Wiki ",
"for detailed explanation.": "für eine ausführliche Erklärung.",
"Create embedding": "Embedding erstellen",
"Create hypernetwork": "Hypernetwork erstellen",
"Preprocess images": "Bilder vorbereiten",
"Name": "Name",
"Initialization text": "Initialisierungstext",
"Number of vectors per token": "Anzahl der Vektoren pro Token",
"Overwrite Old Embedding": "Alte Embeddings überschreiben",
"Modules": "Module",
"Enter hypernetwork layer structure": "Hypernetwork-Ebenenstruktur angeben",
"Select activation function of hypernetwork": "Aktivierungsfunktion des Hypernetwork auswählen",
"linear": "linear",
"relu": "relu",
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
"tanh": "tanh",
"sigmoid": "sigmoid",
"celu": "celu",
"gelu": "gelu",
"glu": "glu",
"hardshrink": "hardshrink",
"hardsigmoid": "hardsigmoid",
"hardtanh": "hardtanh",
"logsigmoid": "logsigmoid",
"logsoftmax": "logsoftmax",
"mish": "mish",
"prelu": "prelu",
"rrelu": "rrelu",
"relu6": "relu6",
"selu": "selu",
"silu": "silu",
"softmax": "softmax",
"softmax2d": "softmax2d",
"softmin": "softmin",
"softplus": "softplus",
"softshrink": "softshrink",
"softsign": "softsign",
"tanhshrink": "tanhshrink",
"threshold": "threshold",
"Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "Auswahl der Initialisierung der Ebenengewichte. Empfohlen wird relu-like - Kaiming, sigmoid-like - Xavier",
"Normal": "Normal",
"KaimingUniform": "KaimingUniform",
"KaimingNormal": "KaimingNormal",
"XavierUniform": "XavierUniform",
"XavierNormal": "XavierNormal",
"Add layer normalization": "Ebenennormalisierung hinzufügen",
"Use dropout": "Dropout benutzen",
"Overwrite Old Hypernetwork": "Altes Hypernetwork überschreiben",
"Source directory": "Quellenverzeichnis",
"Destination directory": "Zielverzeichnis",
"Existing Caption txt Action": "Vorhandene Beschriftung der txt",
"ignore": "ignorieren",
"copy": "kopieren",
"prepend": "voranstellen",
"append": "anhängen",
"Create flipped copies": "Gespiegelte Bilder erstellen",
"Split oversized images": "Übergroße Bilder aufteilen",
"Auto focal point crop": "Automatisch auf Fokuspunkt zuschneiden",
"Use BLIP for caption": "BLIP für Beschriftung nutzen",
"Use deepbooru for caption": "Deepbooru für Beschriftung nutzen",
"Split image threshold": "Schwellenwert für die Aufteilung von Bildern",
"Split image overlap ratio": "Überschneidungsverhältnis der Teilbilder",
"Focal point face weight": "Fokuspunkt Gesicht Gewicht",
"Focal point entropy weight": "Fokuspunkt Entropie Gewicht",
"Focal point edges weight": "Fokuspunkt Kanten Gewicht",
"Create debug image": "Testbild erstellen",
"Preprocess": "Vorbereiten",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "Trainieren eines Embeddings oder eines Hypernetworks; Sie müssen ein Verzeichnis mit einem Satz von Bildern im Verhältnis 1:1 angeben",
"[wiki]": "[Wiki]",
"Embedding": "Embedding",
"Embedding Learning rate": "Embedding Lernrate",
"Hypernetwork Learning rate": "Hypernetwork Lernrate",
"Dataset directory": "Datensatzverzeichnis",
"Log directory": "Protokollverzeichnis",
"Prompt template file": "Prompt-Vorlagendatei",
"Max steps": "Max Schritte",
"Save an image to log directory every N steps, 0 to disable": "Speichere alle N Schritte ein Bild im Protokollverzeichnis, 0 zum Deaktivieren",
"Save a copy of embedding to log directory every N steps, 0 to disable": "Speichere alle N Schritte eine Embeddingkopie im Protokollverzeichnis, 0 zum Deaktivieren",
"Save images with embedding in PNG chunks": "Speichere Bilder mit Embeddings in PNG Chunks",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "Lese Parameter (Prompt, etc...) aus dem txt2img-Tab beim Erstellen von Vorschaubildern.",
"Train Hypernetwork": "Hypernetwork Trainieren",
"Train Embedding": "Embedding Trainieren",
"Apply settings": "Eintellungen anwenden",
"Saving images/grids": "Bilder/Raster speichern",
"Always save all generated images": "Immer alle generierten Bilder speichern",
"File format for images": "Dateiformat für Bilder",
"Images filename pattern": "Dateinamensmuster für Bilder",
"Add number to filename when saving": "Beim speichern, dem Dateinamen Nummer anhängen",
"Always save all generated image grids": "Immer alle generierten Bildraster speichern",
"File format for grids": "Dateiformat für Raster",
"Add extended info (seed, prompt) to filename when saving grid": "Beim Speichern von Rastern zusätzliche Information (Seed, Prompt) hinzufügen",
"Do not save grids consisting of one picture": "Keine Raster speichern, die nur aus einem Bild bestehen",
"Prevent empty spots in grid (when set to autodetect)": "Lücken im Raster verhindern (falls auf Auto-Erkennung gesetzt)",
"Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "Rasterreihenanzahl; -1 für Auto-Erkennung und 0 für die gleiche wie die Batchanzahl",
"Save text information about generation parameters as chunks to png files": "Generationsparameter als Chunks in PNG-Dateien speichern",
"Create a text file next to every image with generation parameters.": "Erstelle zu jedem Bild eine Textdatei, die die Generationsparameter enthält",
"Save a copy of image before doing face restoration.": "Vor der Gesichtswiederhestellung eine Kopie des Bildes speichern",
"Quality for saved jpeg images": "Qualität der JPEG-Bilder",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Wenn ein PNG-Bild größer als 4MB oder die Dimensionen größer als 4000 ist, herunterskalieren und als JPG speichern.",
"Use original name for output filename during batch process in extras tab": "Orginale Dateinamen als Ausgabenamen bei der Batchverarbeitung im Extras-Tab verwenden",
"When using 'Save' button, only save a single selected image": "Bei der Benutzung des 'Speichern'-Knopfes, nur das ausgewählte Bild speichern",
"Do not add watermark to images": "Den Bildern kein Wasserzeichen hinzufügen",
"Paths for saving": "Pfade zum Speichern",
"Output directory for images; if empty, defaults to three directories below": "Ausgabeverzeichnis für Bilder; Falls leer, werden die Pfade unterhalb verwendet",
"Output directory for txt2img images": "Ausgabeverzeichnis für txt2img Bilder",
"Output directory for img2img images": "Ausgabeverzeichnis für img2img Bilder",
"Output directory for images from extras tab": "Ausgabeverzeichnis für Extras-Tab Bilder",
"Output directory for grids; if empty, defaults to two directories below": "Ausgabeverzeichnis für Raster; Falls leer, werden die Pfade unterhalb verwendet",
"Output directory for txt2img grids": "Ausgabeverzeichnis für txt2img Raster",
"Output directory for img2img grids": "Ausgabeverzeichnis für img2img Raster",
"Directory for saving images using the Save button": "Ausgabeverzeichnis für Bilder, die mit dem 'Speichern'-Knopf gespeichert wurden",
"Saving to a directory": "Speichern in ein Verzeichnis",
"Save images to a subdirectory": "Bilder in ein Unterverzeichnis speichern",
"Save grids to a subdirectory": "Raster in ein Unterverzeichnis speichern",
"When using \"Save\" button, save images to a subdirectory": "Bilder bei der Benutzung des 'Speichern'-Knopfes in ein Unterverzeichnis speichern",
"Directory name pattern": "Muster für Verzeichnisnamen",
"Max prompt words for [prompt_words] pattern": "Maximale Wortanzahl für [prompt_words] Muster",
"Upscaling": "Upscaling",
"Tile size for ESRGAN upscalers. 0 = no tiling.": "Kachelgröße für ESRGAN-Upscaler. 0 = keine Kacheln.",
"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "Kachelüberlappung in Pixeln für ESRGAN-Upscaler. Niedrige Werte = sichtbare Naht.",
"Tile size for all SwinIR.": "Kachelgröße für alle SwinIR.",
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "Kachelüberlappung in Pixeln für SwinIR. Niedrige Werte = sichtbare Naht.",
"LDSR processing steps. Lower = faster": "LDSR-Verarbeitungsschritte. Niedriger = schneller",
"Upscaler for img2img": "Upscaler für img2img",
"Upscale latent space image when doing hires. fix": "Bild des Latent Space upscalen, wenn Highres. Fix benutzt wird",
"Face restoration": "Gesichtswiederhestellung",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer Gewichtung (0 = maximale Wirkung, 1 = minimale Wirkung)",
"Move face restoration model from VRAM into RAM after processing": "Verschiebe Gesichtswiederhestellung-Modell nach der Verarbeitung vom VRAM in den RAM",
"System": "System",
"VRAM usage polls per second during generation. Set to 0 to disable.": "VRAM-Nutzungsabfragen pro Sekunde während der Generierung. Zum Deaktivieren auf 0 setzen.",
"Always print all generation info to standard output": "Immer alle Generationsinformationen in der Standardausgabe ausgeben",
"Add a second progress bar to the console that shows progress for an entire job.": "Der Konsole einen zweiten Fortschrittsbalken hinzufügen, der den Fortschritt eines gesamten Auftrags anzeigt.",
"Training": "Training",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "VAE und CLIP während des Hypernetwork-Trainings in den RAM verschieben. Spart VRAM.",
"Filename word regex": "Filename word regex",
"Filename join string": "Filename join string",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Anzahl der Wiederholungen für ein einzelnes Eingabebild pro Epoche; wird nur für die Anzeige der Epochennummer verwendet",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Speichere eine csv-Datei, die den Verlust enthält, im Protokollverzeichnis alle N Schritte, 0 zum Deaktivieren",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "Checkpoints zum Zwischenspeichern im RAM",
"Hypernetwork strength": "Hypernetworkstärke",
"Inpainting conditioning mask strength": "Inpainting Stärke der Konditionierungsmaske",
"Apply color correction to img2img results to match original colors.": "Farbkorrektur auf die img2img-Ergebnisse anwenden, damit sie den Originalfarben entsprechen.",
"Save a copy of image before applying color correction to img2img results": "Vor dem Anwenden der Farbkorrektur eine Kopie des Bildes speichern",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Mit img2img, die exakte Anzahl der Schritte ausführen, die vom Schieberegler angegeben sind (normalerweise weniger bei weniger Denoising).",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Aktivieren der Quantisierung in K-Samplern für schärfere und sauberere Ergebnisse. Dies kann bestehende Seeds verändern. Erfordert Neustart.",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Hervorhebung: Verwenden Sie (Text), damit das Modell dem Text mehr Aufmerksamkeit schenkt, und [Text], damit es ihm weniger Aufmerksamkeit schenkt",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "Verwenden der alten Implementierung von Hervorhebungen. Kann nützlich sein, um alte Seeds zu reproduzieren.",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "K-Diffusions-Sampler erzeugen in einem Batch die gleichen Bilder, wie bei der Erstellung eines einzelnen Bildes",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "Erhöhung der Kohärenz durch Auffüllen ab dem letzten Komma innerhalb von n Token, wenn mehr als 75 Token verwendet werden",
"Filter NSFW content": "NSFW-Inhalte filtern",
"Stop At last layers of CLIP model": "Stoppe bei den letzten Schichten des CLIP-Modells",
"Interrogate Options": "Interrogate Optionen",
"Interrogate: keep models in VRAM": "Interrogate: Modelle im VRAM behalten",
"Interrogate: use artists from artists.csv": "Interrogate: Künstler aus 'artists.csv' nutzen",
"Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "Interrogate: Die Rangfolge von Modell-Tags in den Ergebnissen einschließen (hat keine Auswirkung auf beschriftungsbasierte Interrogator).",
"Interrogate: num_beams for BLIP": "Interrogate: num_beams für BLIP",
"Interrogate: minimum description length (excluding artists, etc..)": "Interrogate: minimale Beschreibungslänge (Künstler, etc.. ausgenommen)",
"Interrogate: maximum description length": "Interrogate: maximale Beschreibungslänge",
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: maximale Anzahl an Zeilen in Textdatei (0 = Kein Limit)",
"Interrogate: deepbooru score threshold": "Interrogate: Deepbooru minimale Punkteanzahl",
"Interrogate: deepbooru sort alphabetically": "Interrogate: Sortiere Deepbooru alphabetisch",
"use spaces for tags in deepbooru": "Benutze Leerzeichen für Deepbooru-Tags",
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "Escape-Klammern (\\) in Deepbooru (damit sie als normale Klammern und nicht zur Hervorhebung verwendet werden)",
"User interface": "Benutzeroberfläche",
"Show progressbar": "Fortschrittsleiste anzeigen",
"Show image creation progress every N sampling steps. Set 0 to disable.": "Zeige eine Bildvorschau alle N Samplingschritte. Zum Deaktivieren auf 0 setzen.",
"Show previews of all images generated in a batch as a grid": "Zeige eine Vorschau aller erzeugten Bilder in einem Batch als Raster",
"Show grid in results for web": "Zeige Raster in der Web-UI Vorschau",
"Do not show any images in results for web": "Keine Bilder in der Web-UI Vorschau zeigen",
"Add model hash to generation information": "Hash des Modells zu den Generationsinformationen hinzufügen",
"Add model name to generation information": "Name des Modells zu den Generationsinformationen hinzufügen",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Beim Einlesen von Generierungsparametern aus Text in die Benutzeroberfläche (aus PNG-Info oder eingefügtem Text) wird das ausgewählte Modell/Checkpoint nicht geändert.",
"Send seed when sending prompt or image to other interface": "Den Seed, beim Senden des Bildes/Prompt zu einem anderen Tab, mitsenden",
"Font for image grids that have text": "Schriftart für Bildraster mit Text",
"Enable full page image viewer": "Ganzseitenbildbetrachter aktivieren",
"Show images zoomed in by default in full page image viewer": "Standardmäßig Bilder im Ganzseitenbildbetrachter vergrößert anzeigen",
"Show generation progress in window title.": "Generationsfortschritt im Fenstertitel anzeigen.",
"Quicksettings list": "Schnellzugriffsleiste",
"Localization (requires restart)": "Lokalisierung (Erfordert Neustart)",
"Sampler parameters": "Samplerparameter",
"Hide samplers in user interface (requires restart)": "Sampler in der Benutzeroberfläche verstecken (Erfordert Neustart)",
"eta (noise multiplier) for DDIM": "Eta (noise Multiplikator) für DDIM",
"eta (noise multiplier) for ancestral samplers": "Eta (noise Multiplikator) für Ancestral Sampler",
"img2img DDIM discretize": "img2img DDIM diskretisieren",
"uniform": "uniform",
"quad": "quad",
"sigma churn": "sigma churn",
"sigma tmin": "sigma tmin",
"sigma noise": "sigma noise",
"Eta noise seed delta": "Eta noise seed delta",
"Request browser notifications": "Browserbenachrichtigungen anfordern",
"Download localization template": "Vorlage für Lokalisierung herunterladen",
"Reload custom script bodies (No ui updates, No restart)": "Benutzerdefinierte Skripte neu laden (keine Aktualisierung der Benutzeroberfläche, kein Neustart)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Gradio neu starten und Komponenten aktualisieren (nur Custom Scripts, ui.py, js und css)",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (zum Erzeugen Strg+Eingabe oder Alt+Eingabe drücken)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Negative Prompt (zum Erzeugen Strg+Eingabe oder Alt+Eingabe drücken)",
"Add a random artist to the prompt.": "Zufälligen Künstler der Prompt hinzufügen.",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "Lesen der Generationsparameter aus der Prompt oder der letzten Generation (wenn Prompt leer ist) in die Benutzeroberfläche.",
"Save style": "Stil speichern",
"Apply selected styles to current prompt": "Momentan ausgewählte Stile auf die Prompt anwenden",
"Stop processing current image and continue processing.": "Verarbeitung des momentanen Bildes abbrechen und Verarbeitung fortsetzen.",
"Stop processing images and return any results accumulated so far.": "Verarbeitung abbrechen und alle bisherigen Ergebnisse ausgeben.",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "Stil, der angwendet werden soll. Stile haben sowohl positive als auch negative Promptanteile und werden auf beide angewandt.",
"Do not do anything special": "Nichts besonderes machen",
"Which algorithm to use to produce the image": "Der zu benutzende Algorithmus für die Bildgeneration",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - sehr kreativ, kann sehr unterschiedliche Bilder in Abhängigkeit von der Schrittanzahl bekommen. Werte höher als 30-40 helfen nicht.",
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Modelle - am besten für inpainting",
"Produce an image that can be tiled.": "Bild erzeugen, dass gekachelt werden kann.",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Verwendung eines zweistufigen Prozesses, um ein Bild mit geringerer Auflösung zu erstellen, hochzuskalieren und dann die Details zu verbessern, ohne die Komposition zu verändern.",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Bestimmt, wie wenig Bezug der Algorithmus zu dem Inhalt des Bildes haben soll. Bei 0 ändert sich nichts, und bei 1 besitzt das Bild keinen Bezug. Bei Werten unter 1,0 erfolgt die Verarbeitung in weniger Schritten, als der Schieberegler angibt.",
"How many batches of images to create": "Wie viele Sätze von Bildern erstellt werden sollen",
"How many image to create in a single batch": "Wie viele Bilder in einem Batch erstellt werden sollen",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - wie stark das Bild der Prompt entsprechen soll - niedrigere Werte führen zu kreativeren Ergebnissen",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Ein Wert, der die Ausgabe des Zufallszahlengenerators bestimmt: Wenn ein Bild mit denselben Parametern und demselben Seed wie ein anderes Bild erstellt wird, erhält man dasselbe Ergebnis.",
"Set seed to -1, which will cause a new random number to be used every time": "Seed auf -1 setzen, so dass jedes Mal eine neue Zufallszahl verwendet wird",
"Reuse seed from last generation, mostly useful if it was randomed": "Wiederverwendung des Seeds der letzten Generation, meist nützlich, wenn er zufällig gewählt wurde",
"Seed of a different picture to be mixed into the generation.": "Seed eines anderen Bildes, der bei der Erzeugung reingemischt wird.",
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Wie stark die Veränderung sein soll. Bei 0 gibt es keinen Effekt. Bei 1 erhält man das vollständige Bild mit dem Variationsseed (außer bei Ancestral Samplern, wie Euler A, wo man nur etwas erhält).",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Versuche ein Bild zu erzeugen, das dem ähnelt, das mit dem Seed bei der angegebenen Auflösung erzeugt worden wäre.",
"Separate values for X axis using commas.": "Trenne die Werte für die X-Achse durch Kommas.",
"Separate values for Y axis using commas.": "Trenne die Werte für die Y-Achse durch Kommas.",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "Bild in ein Verzeichnis (Standard - log/images) und Generationsparameter in eine csv-Datei schreiben.",
"Open images output directory": "Ausgabeverzeichnis öffnen",
"How much to blur the mask before processing, in pixels.": "Wie stark die Maske vor der Verarbeitung weichgezeichnet werden soll, in Pixeln.",
"What to put inside the masked area before processing it with Stable Diffusion.": "Was in den maskierten Bereich vor der Verarbeitung mit Stable Diffusion soll.",
"fill it with colors of the image": "Füllen mit den Farben des Bildes",
"keep whatever was there originally": "Originalen Inhalt behalten",
"fill it with latent space noise": "Füllen mit latent space noise",
"fill it with latent space zeroes": "Füllen mit latent space Nullen",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Hochskalieren des maskierten Bereichs auf die Zielauflösung, Inpainting, Zurückskalieren und Einfügen in das Originalbild.",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Die Größe des Bildes auf die gewünschte Auflösung ändern. Wenn Höhe und Breite nicht übereinstimmen, erhält man ein falsches Seitenverhältnis.",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Die Größe des Bildes so ändern, dass die gesamte Zielauflösung mit dem Bild ausgefüllt wird. Herausragende Teile werden abgeschnitten.",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Die Größe des Bildes so ändern, dass das gesamte Bild enthalten ist. Lücken werden mit Farben des Bildes ausgefüllt.",
"How many times to repeat processing an image and using it as input for the next iteration": "Wie oft die Verarbeitung eines Bildes wiederholt und als Eingabe für die nächste Iteration verwendet werden soll",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "Im Loopback-Modus wird die Denoisingstärke in jeder Schleife mit diesem Wert multipliziert. <1 bedeutet abnehmende Vielfalt, so dass die Sequenz zu einem festen Bild konvergiert. >1 bedeutet zunehmende Vielfalt, so dass die Sequenz immer chaotischer wird.",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "Wie viel Pixel sich beim SD-Upscale zwischen den Kacheln überlappen. Die Kacheln überlappen sich so, dass beim Zusammenfügen zu einem Bild keine deutlich sichtbare Naht entsteht.",
"A directory on the same machine where the server is running.": "Ein Verzeichnis auf demselben Rechner, auf dem der Server läuft.",
"Leave blank to save images to the default path.": "Leer lassen, um Bilder im Standardpfad zu speichern.",
"Result = A * (1 - M) + B * M": "Ergebnis = A * (1 - M) + B * M",
"Result = A + (B - C) * M": "Ergebnis = A + (B - C) * M",
"1st and last digit must be 1. ex:'1, 2, 1'": "Erste und letzte Ziffer müssen 1 sein. Bspl:'1, 2, 1'",
"Path to directory with input images": "Pfad zum Verzeichnis mit den Eingabebildern",
"Path to directory where to write outputs": "Pfad zum Verzeichnis, wo die Ausgaben gespeichert werden",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "Verwende die folgenden Tags, um festzulegen, wie die Dateinamen für Bilder ausgewählt werden: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leer lassen, um Standardwerte zu verwenden.",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "Wenn diese Option aktiviert ist, wird den erstellten Bildern kein Wasserzeichen hinzugefügt. Achtung: Wenn Sie kein Wasserzeichen hinzufügen, verhalten Sie sich möglicherweise unethisch.",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "Verwenden Sie die folgenden Tags, um festzulegen, wie Unterverzeichnisse für Bilder und Raster ausgewählt werden: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leer lassen, um Standardwerte zu verwenden.",
"Restore low quality faces using GFPGAN neural network": "Wiederherstellung von Gesichtern schlechter Qualität mit dem neuralen Netzwerk GFPGAN",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Dieser reguläre Ausdruck wird verwendet, um Wörter aus dem Dateinamen zu extrahieren, die dann mit der unten stehenden Option zu einem Beschriftungstext für das Training zusammengefügt werden. Leer lassen, um den Text des Dateinamens so zu belassen, wie er ist.",
"This string will be used to join split words into a single line if the option above is enabled.": "Diese Zeichenfolge wird verwendet, um getrennte Wörter in einer einzigen Zeile zu verbinden, wenn die obige Option aktiviert ist.",
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Gilt nur für Inpainting-Modelle. Legt fest, wie stark das Originalbild für Inpainting und img2img maskiert werden soll. 1.0 bedeutet vollständig maskiert, was das Standardverhalten ist. 0.0 bedeutet eine vollständig unmaskierte Konditionierung. Niedrigere Werte tragen dazu bei, die Gesamtkomposition des Bildes zu erhalten, sind aber bei großen Änderungen problematisch.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Liste von Einstellungsnamen, getrennt durch Kommas, für Einstellungen, die in der Schnellzugriffsleiste oben erscheinen sollen, anstatt in dem üblichen Einstellungs-Tab. Siehe modules/shared.py für Einstellungsnamen. Erfordert einen Neustart zur Anwendung.",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Wenn dieser Wert ungleich Null ist, wird er zum Seed addiert und zur Initialisierung des RNG für Noise bei der Verwendung von Samplern mit Eta verwendet. Dies kann verwendet werden, um noch mehr Variationen von Bildern zu erzeugen, oder um Bilder von anderer Software zu erzeugen, wenn Sie wissen, was Sie tun."
}

1217
localizations/it_IT.json Normal file

File diff suppressed because it is too large Load Diff

View File

@ -9,11 +9,13 @@
" images in this directory. Loaded ": "개의 이미지가 이 경로에 존재합니다. ",
" pages": "페이지로 나뉘어 표시합니다.",
", divided into ": "입니다. ",
". Use Installed tab to restart.": "에 성공적으로 설치하였습니다. 설치된 확장기능 탭에서 UI를 재시작해주세요.",
"1st and last digit must be 1. ex:'1, 2, 1'": "1st and last digit must be 1. ex:'1, 2, 1'",
"[wiki]": " [위키] 참조",
"A directory on the same machine where the server is running.": "WebUI 서버가 돌아가고 있는 디바이스에 존재하는 디렉토리를 선택해 주세요.",
"A merger of the two checkpoints will be generated in your": "체크포인트들이 병합된 결과물이 당신의",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "난수 생성기의 결과물을 지정하는 값 - 동일한 설정값과 동일한 시드를 적용 시, 완전히 똑같은 결과물을 얻게 됩니다.",
"Action": "작업",
"Add a random artist to the prompt.": "프롬프트에 랜덤한 작가 추가",
"Add a second progress bar to the console that shows progress for an entire job.": "콘솔에 전체 작업의 진행도를 보여주는 2번째 프로그레스 바 추가하기",
"Add difference": "차이점 추가",
@ -22,6 +24,8 @@
"Add model hash to generation information": "생성 정보에 모델 해시 추가",
"Add model name to generation information": "생성 정보에 모델 이름 추가",
"Add number to filename when saving": "이미지를 저장할 때 파일명에 숫자 추가하기",
"Aesthetic Gradients": "스타일 그라디언트",
"Aesthetic Image Scorer": "스타일 이미지 스코어러",
"Aesthetic imgs embedding": "스타일 이미지 임베딩",
"Aesthetic learning rate": "스타일 학습 수",
"Aesthetic steps": "스타일 스텝 수",
@ -33,22 +37,31 @@
"Always save all generated images": "생성된 이미지 항상 저장하기",
"api": "",
"append": "뒤에 삽입",
"Append commas": "쉼표 삽입",
"Apply and restart UI": "적용 후 UI 재시작",
"Apply color correction to img2img results to match original colors.": "이미지→이미지 결과물이 기존 색상과 일치하도록 색상 보정 적용하기",
"Apply selected styles to current prompt": "현재 프롬프트에 선택된 스타일 적용",
"Apply settings": "설정 적용하기",
"Artists to study": "연구할만한 작가들",
"Auto focal point crop": "초점 기준 크롭(자동 감지)",
"Autocomplete options": "자동완성 설정",
"Available": "지원되는 확장기능 목록",
"Batch count": "배치 수",
"Batch from Directory": "저장 경로로부터 여러장 처리",
"Batch img2img": "이미지→이미지 배치",
"Batch Process": "이미지 여러장 처리",
"Batch size": "배치 크기",
"behind": "최신 아님",
"BSRGAN 4x": "BSRGAN 4x",
"built with gradio": "gradio로 제작되었습니다",
"Calculates aesthetic score for generated images using CLIP+MLP Aesthetic Score Predictor based on Chad Scorer": "Chad 스코어러를 기반으로 한 CLIP+MLP 스타일 점수 예측기를 이용해 생성된 이미지의 스타일 점수를 계산합니다.",
"Cancel generate forever": "반복 생성 취소",
"cfg cnt": "CFG 변화 횟수",
"cfg count": "CFG 변화 횟수",
"CFG Scale": "CFG 스케일",
"cfg1 min/max": "CFG1 최소/최대",
"cfg2 min/max": "CFG2 최소/최대",
"Check for updates": "업데이트 확인",
"Check progress": "진행도 체크",
"Check progress (first)": "진행도 체크 (처음)",
"checkpoint": " 체크포인트 ",
@ -64,10 +77,14 @@
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer 가중치 설정값 (0 = 최대 효과, 1 = 최소 효과)",
"Collect": "즐겨찾기",
"Color variation": "색깔 다양성",
"Combinations": "조합",
"Combinatorial batches": "조합 배치 수",
"Combinatorial generation": "조합 생성",
"copy": "복사",
"Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows": "서로 다른 설정값으로 생성된 이미지의 그리드를 만듭니다. 아래의 설정으로 가로/세로에 어떤 설정값을 적용할지 선택하세요.",
"Create a text file next to every image with generation parameters.": "생성된 이미지마다 생성 설정값을 담은 텍스트 파일 생성하기",
"Create aesthetic images embedding": "스타일 이미지 임베딩 생성하기",
"Create an embedding from one or few pictures and use it to apply their style to generated images.": "하나 혹은 그 이상의 이미지들로부터 임베딩을 생성해, 그 이미지들의 스타일을 다른 이미지 생성 시 적용할 수 있게 해줍니다.",
"Create debug image": "디버그 이미지 생성",
"Create embedding": "임베딩 생성",
"Create flipped copies": "좌우로 뒤집은 복사본 생성",
@ -78,14 +95,18 @@
"custom fold": "커스텀 경로",
"Custom Name (Optional)": "병합 모델 이름 (선택사항)",
"Dataset directory": "데이터셋 경로",
"Dataset Tag Editor": "데이터셋 태그 편집기",
"date": "생성 일자",
"DDIM": "DDIM",
"Decode CFG scale": "디코딩 CFG 스케일",
"Decode steps": "디코딩 스텝 수",
"Delete": "삭제",
"delete next": "선택한 이미지부터 시작해서 삭제할 이미지 갯수",
"Denoising": "디노이징",
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - 인페이팅에 뛰어남",
"Denoising strength": "디노이즈 강도",
"Denoising strength change factor": "디노이즈 강도 변경 배수",
"Description": "설명",
"Destination directory": "결과물 저장 경로",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "알고리즘이 얼마나 원본 이미지를 반영할지를 결정하는 수치입니다. 0일 경우 아무것도 바뀌지 않고, 1일 경우 원본 이미지와 전혀 관련없는 결과물을 얻게 됩니다. 1.0 아래의 값일 경우, 설정된 샘플링 스텝 수보다 적은 스텝 수를 거치게 됩니다.",
"Directory for saving images using the Save button": "저장 버튼을 이용해 저장하는 이미지들의 저장 경로",
@ -108,6 +129,8 @@
"Draw mask": "마스크 직접 그리기",
"Drop File Here": "파일을 끌어 놓으세요",
"Drop Image Here": "이미지를 끌어 놓으세요",
"Dropdown": "드롭다운",
"Dynamic Prompts": "다이나믹 프롬프트",
"Embedding": "임베딩",
"Embedding Learning rate": "임베딩 학습률",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "강조 : (텍스트)를 이용해 모델의 텍스트에 대한 가중치를 더 강하게 주고 [텍스트]를 이용해 더 약하게 줍니다.",
@ -127,6 +150,9 @@
"Euler a": "Euler a",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 매우 창의적, 스텝 수에 따라 완전히 다른 결과물이 나올 수 있음. 30~40보다 높은 스텝 수는 효과가 미미함",
"Existing Caption txt Action": "이미 존재하는 캡션 텍스트 처리",
"Extension": "확장기능",
"Extension index URL": "확장기능 목록 URL",
"Extensions": "확장기능",
"Extra": "고급",
"Extras": "부가기능",
"extras": "부가기능",
@ -134,7 +160,7 @@
"Face restoration": "얼굴 보정",
"Face restoration model": "얼굴 보정 모델",
"Fall-off exponent (lower=higher detail)": "감쇠 지수 (낮을수록 디테일이 올라감)",
"favorites": "즐겨찾기",
"Favorites": "즐겨찾기",
"File": "파일",
"File format for grids": "그리드 이미지 파일 형식",
"File format for images": "이미지 파일 형식",
@ -150,6 +176,7 @@
"First Page": "처음 페이지",
"Firstpass height": "초기 세로길이",
"Firstpass width": "초기 가로길이",
"Fixed seed": "시드 고정",
"Focal point edges weight": "경계면 가중치",
"Focal point entropy weight": "엔트로피 가중치",
"Focal point face weight": "얼굴 가중치",
@ -184,8 +211,10 @@
"ignore": "무시",
"Image": "이미지",
"Image Browser": "이미지 브라우저",
"Image browser": "이미지 브라우저",
"Image for img2img": "Image for img2img",
"Image for inpainting with mask": "마스크로 인페인팅할 이미지",
"Image not found (may have been already moved)": "이미지를 찾을 수 없습니다 (이미 옮겨졌을 수 있음)",
"Images Browser": "이미지 브라우저",
"Images directory": "이미지 경로",
"Images filename pattern": "이미지 파일명 패턴",
@ -193,6 +222,7 @@
"img2img alternative test": "이미지→이미지 대체버전 테스트",
"img2img DDIM discretize": "이미지→이미지 DDIM 이산화",
"img2img history": "이미지→이미지 기록",
"Implements an expressive template language for random or combinatorial prompt generation along with features to support deep wildcard directory structures.": "무작위/조합 프롬프트 생성을 위한 문법과 복잡한 와일드카드 구조를 지원합니다.",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "루프백 모드에서는 매 루프마다 디노이즈 강도에 이 값이 곱해집니다. 1보다 작을 경우 다양성이 낮아져 결과 이미지들이 고정된 형태로 모일 겁니다. 1보다 클 경우 다양성이 높아져 결과 이미지들이 갈수록 혼란스러워지겠죠.",
"Include Separate Images": "분리된 이미지 포함하기",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "75개보다 많은 토큰을 사용시 마지막 쉼표로부터 N개의 토큰 이내에 패딩을 추가해 통일성 증가시키기",
@ -202,8 +232,14 @@
"Inpaint at full resolution padding, pixels": "전체 해상도로 인페인트시 패딩값(픽셀 단위)",
"Inpaint masked": "마스크만 처리",
"Inpaint not masked": "마스크 이외만 처리",
"Inpainting conditioning mask strength": "인페인팅 조절 마스크 강도",
"Input directory": "인풋 이미지 경로",
"Input images directory": "이미지 경로 입력",
"Inspiration": "\"영감\"",
"Install": "설치",
"Install from URL": "URL로부터 확장기능 설치",
"Installed": "설치된 확장기능",
"Installed into ": "확장기능을 ",
"Interpolation Method": "보간 방법",
"Interrogate\nCLIP": "CLIP\n분석",
"Interrogate\nDeepBooru": "DeepBooru\n분석",
@ -218,9 +254,11 @@
"Interrogate: use artists from artists.csv": "분석 : artists.csv의 작가들 사용하기",
"Interrupt": "중단",
"Is negative text": "네거티브 텍스트일시 체크",
"Iterate seed every line": "줄마다 시드 반복하기",
"Just resize": "리사이징",
"Keep -1 for seeds": "시드값 -1로 유지",
"keep whatever was there originally": "이미지 원본 유지",
"keyword": "프롬프트",
"Label": "라벨",
"Lanczos": "Lanczos",
"Last prompt:": "마지막 프롬프트 : ",
@ -228,22 +266,29 @@
"Last saved image:": "마지막으로 저장된 이미지 : ",
"latent noise": "잠재 노이즈",
"latent nothing": "잠재 공백",
"latest": "최신 버전",
"LDSR": "LDSR",
"LDSR processing steps. Lower = faster": "LDSR 스텝 수. 낮은 값 = 빠른 속도",
"leakyrelu": "leakyrelu",
"Leave blank to save images to the default path.": "기존 저장 경로에 이미지들을 저장하려면 비워두세요.",
"Leave empty for auto": "자동 설정하려면 비워두십시오",
"left": "왼쪽",
"Lets you edit captions in training datasets.": "훈련에 사용되는 데이터셋의 캡션을 수정할 수 있게 해줍니다.",
"linear": "linear",
"List of prompt inputs": "프롬프트 입력 리스트",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "설정 탭이 아니라 상단의 빠른 설정 바에 위치시킬 설정 이름을 쉼표로 분리해서 입력하십시오. 설정 이름은 modules/shared.py에서 찾을 수 있습니다. 재시작이 필요합니다.",
"LMS": "LMS",
"LMS Karras": "LMS Karras",
"Load": "불러오기",
"Load from:": "URL로부터 불러오기",
"Loading...": "로딩 중...",
"Local directory name": "로컬 경로 이름",
"Localization (requires restart)": "현지화 (재시작 필요)",
"Log directory": "로그 경로",
"Loopback": "루프백",
"Loops": "루프 수",
"Loss:": "손실(Loss) : ",
"Magic prompt": "매직 프롬프트",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "동일한 시드 값으로 생성되었을 이미지를 주어진 해상도로 최대한 유사하게 재현합니다.",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "K-diffusion 샘플러들이 단일 이미지를 생성하는 것처럼 배치에서도 동일한 이미지를 생성하게 하기",
"Make Zip when Save?": "저장 시 Zip 생성하기",
@ -257,11 +302,13 @@
"Minimum number of pages per load": "한번 불러올 때마다 불러올 최소 페이지 수",
"Modules": "모듈",
"Move face restoration model from VRAM into RAM after processing": "처리가 완료되면 얼굴 보정 모델을 VRAM에서 RAM으로 옮기기",
"Move to favorites": "즐겨찾기로 옮기기",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "하이퍼네트워크 훈련 진행 시 VAE와 CLIP을 RAM으로 옮기기. VRAM이 절약됩니다.",
"Moved to favorites": "즐겨찾기로 옮겨짐",
"Multiplier (M) - set to 0 to get model A": "배율 (M) - 0으로 적용하면 모델 A를 얻게 됩니다",
"Name": "이름",
"Negative prompt": "네거티브 프롬프트",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "네거티브 프롬프트 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "네거티브 프롬프트(Prompt) 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)",
"Next batch": "다음 묶음",
"Next Page": "다음 페이지",
"None": "없음",
@ -274,6 +321,7 @@
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "세대(Epoch)당 단일 인풋 이미지의 반복 횟수 - 세대(Epoch) 숫자를 표시하는 데에만 사용됩니다. ",
"Number of rows on the page": "각 페이지마다 표시할 세로줄 수",
"Number of vectors per token": "토큰별 벡터 수",
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "인페인팅 모델에만 적용됩니다. 인페인팅과 이미지→이미지에서 원본 이미지를 얼마나 마스킹 처리할지 결정하는 값입니다. 1.0은 완전히 마스킹함(기본 설정)을 의미하고, 0.0은 완전히 언마스킹된 이미지를 의미합니다. 낮은 값일수록 이미지의 전체적인 구성을 유지하는 데에 도움되겠지만, 변화량이 많을수록 불안정해집니다.",
"Open for Clip Aesthetic!": "클립 스타일 기능을 활성화하려면 클릭!",
"Open images output directory": "이미지 저장 경로 열기",
"Open output directory": "저장 경로 열기",
@ -281,6 +329,7 @@
"original": "원본 유지",
"Original negative prompt": "기존 네거티브 프롬프트",
"Original prompt": "기존 프롬프트",
"Others": "기타",
"Outpainting direction": "아웃페인팅 방향",
"Outpainting mk2": "아웃페인팅 마크 2",
"Output directory": "이미지 저장 경로",
@ -299,6 +348,7 @@
"Overwrite Old Hypernetwork": "기존 하이퍼네트워크 덮어쓰기",
"Page Index": "페이지 인덱스",
"parameters": "설정값",
"path name": "경로 이름",
"Path to directory where to write outputs": "결과물을 출력할 경로",
"Path to directory with input images": "인풋 이미지가 있는 경로",
"Paths for saving": "저장 경로",
@ -319,13 +369,14 @@
"Process images in a directory on the same machine where the server is running.": "WebUI 서버가 돌아가고 있는 디바이스에 존재하는 디렉토리의 이미지들을 처리합니다.",
"Produce an image that can be tiled.": "타일링 가능한 이미지를 생성합니다.",
"Prompt": "프롬프트",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "프롬프트 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "프롬프트(Prompt) 입력(Ctrl+Enter나 Alt+Enter로 생성 시작)",
"Prompt matrix": "프롬프트 매트릭스",
"Prompt order": "프롬프트 순서",
"Prompt S/R": "프롬프트 스타일 변경",
"Prompt template file": "프롬프트 템플릿 파일 경로",
"Prompts": "프롬프트",
"Prompts from file or textbox": "파일이나 텍스트박스로부터 프롬프트 불러오기",
"Provides an interface to browse created images in the web browser.": "생성된 이미지를 브라우저 내에서 볼 수 있는 인터페이스를 추가합니다.",
"Put variable parts at start of prompt": "변경되는 프롬프트를 앞에 위치시키기",
"quad": "quad",
"Quality for saved jpeg images": "저장된 jpeg 이미지들의 품질",
@ -333,11 +384,13 @@
"R-ESRGAN 4x+ Anime6B": "R-ESRGAN 4x+ Anime6B",
"Random": "랜덤",
"Random grid": "랜덤 그리드",
"Randomly display the pictures of the artist's or artistic genres typical style, more pictures of this artist or genre is displayed after selecting. So you don't have to worry about how hard it is to choose the right style of art when you create.": "특정 작가 또는 스타일의 이미지들 중 하나를 무작위로 보여줍니다. 선택 후 선택한 작가 또는 스타일의 이미지들이 더 나타나게 됩니다. 고르기 어려워도 걱정하실 필요 없어요!",
"Randomness": "랜덤성",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "클립보드에 복사된 정보로부터 설정값 읽어오기/프롬프트창이 비어있을경우 제일 최근 설정값 불러오기",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "프리뷰 이미지 생성 시 텍스트→이미지 탭에서 설정값(프롬프트 등) 읽어오기",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "추천 설정값 - 샘플링 스텝 수 : 80-100 , 샘플러 : Euler a, 디노이즈 강도 : 0.8",
"Reload custom script bodies (No ui updates, No restart)": "커스텀 스크립트 리로드하기(UI 업데이트 없음, 재시작 없음)",
"Reloading...": "재시작 중...",
"relu": "relu",
"Renew Page": "Renew Page",
"Request browser notifications": "브라우저 알림 권한 요청",
@ -357,6 +410,7 @@
"Reuse seed from last generation, mostly useful if it was randomed": "이전 생성에서 사용된 시드를 불러옵니다. 랜덤하게 생성했을 시 도움됨",
"right": "오른쪽",
"Run": "가동",
"Sample extension. Allows you to use __name__ syntax in your prompt to get a random line from a file named name.txt in the wildcards directory. Also see Dynamic Prompts for similar functionality.": "샘플 확장기능입니다. __이름__형식의 문법을 사용해 와일드카드 경로 내의 이름.txt파일로부터 무작위 프롬프트를 적용할 수 있게 해줍니다. 유사한 확장기능으로 다이나믹 프롬프트가 있습니다.",
"Sampler": "샘플러",
"Sampler parameters": "샘플러 설정값",
"Sampling method": "샘플링 방법",
@ -388,6 +442,7 @@
"Select activation function of hypernetwork": "하이퍼네트워크 활성화 함수 선택",
"Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "레이어 가중치 초기화 방식 선택 - relu류 : Kaiming 추천, sigmoid류 : Xavier 추천",
"Select which Real-ESRGAN models to show in the web UI. (Requires restart)": "WebUI에 표시할 Real-ESRGAN 모델을 선택하십시오. (재시작 필요)",
"Send seed when sending prompt or image to other interface": "다른 화면으로 프롬프트나 이미지를 보낼 때 시드도 함께 보내기",
"Send to extras": "부가기능으로 전송",
"Send to img2img": "이미지→이미지로 전송",
"Send to inpaint": "인페인트로 전송",
@ -407,6 +462,7 @@
"Show progressbar": "프로그레스 바 보이기",
"Show result images": "이미지 결과 보이기",
"Show Textbox": "텍스트박스 보이기",
"Shows a gallery of generated pictures by artists separated into categories.": "생성된 이미지들을 작가별로 분류해 보여줍니다. 원본 - https://artiststostudy.pages.dev",
"Sigma adjustment for finding noise for image": "이미지 노이즈를 찾기 위해 시그마 조정",
"Sigma Churn": "시그마 섞기",
"sigma churn": "시그마 섞기",
@ -419,6 +475,7 @@
"Skip": "건너뛰기",
"Slerp angle": "구면 선형 보간 각도",
"Slerp interpolation": "구면 선형 보간",
"sort by": "정렬 기준",
"Source": "원본",
"Source directory": "원본 경로",
"Split image overlap ratio": "이미지 분할 겹침 비율",
@ -426,6 +483,7 @@
"Split oversized images": "사이즈가 큰 이미지 분할하기",
"Stable Diffusion": "Stable Diffusion",
"Stable Diffusion checkpoint": "Stable Diffusion 체크포인트",
"step cnt": "스텝 변화 횟수",
"step count": "스텝 변화 횟수",
"step1 min/max": "스텝1 최소/최대",
"step2 min/max": "스텝2 최소/최대",
@ -442,6 +500,7 @@
"System": "시스템",
"Tertiary model (C)": "3차 모델 (C)",
"Textbox": "텍스트박스",
"The official port of Deforum, an extensive script for 2D and 3D animations, supporting keyframable sequences, dynamic math parameters (even inside the prompts), dynamic masking, depth estimation and warping.": "Deforum의 공식 포팅 버전입니다. 2D와 3D 애니메이션, 키프레임 시퀀스, 수학적 매개변수, 다이나믹 마스킹 등을 지원합니다.",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "이 정규표현식은 파일명으로부터 단어를 추출하는 데 사용됩니다. 추출된 단어들은 하단의 설정을 이용해 라벨 텍스트로 변환되어 훈련에 사용됩니다. 파일명 텍스트를 유지하려면 비워두십시오.",
"This string will be used to join split words into a single line if the option above is enabled.": "이 문자열은 상단 설정이 활성화되어있을 때 분리된 단어들을 한 줄로 합치는 데 사용됩니다.",
"This text is used to rotate the feature space of the imgs embs": "이 텍스트는 이미지 임베딩의 특징 공간을 회전하는 데 사용됩니다.",
@ -462,8 +521,12 @@
"txt2img": "텍스트→이미지",
"txt2img history": "텍스트→이미지 기록",
"uniform": "uniform",
"unknown": "알수 없음",
"up": "위쪽",
"Update": "업데이트",
"Upload mask": "마스크 업로드하기",
"Upload prompt inputs": "입력할 프롬프트를 업로드하십시오",
"Upscale Before Restoring Faces": "얼굴 보정을 진행하기 전에 업스케일링 먼저 진행하기",
"Upscale latent space image when doing hires. fix": "고해상도 보정 사용시 잠재 공간 이미지 업스케일하기",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "마스크된 부분을 설정된 해상도로 업스케일하고, 인페인팅을 진행한 뒤, 다시 다운스케일 후 원본 이미지에 붙여넣습니다.",
"Upscaler": "업스케일러",
@ -472,9 +535,12 @@
"Upscaler 2 visibility": "업스케일러 2 가시성",
"Upscaler for img2img": "이미지→이미지 업스케일러",
"Upscaling": "업스케일링",
"URL for extension's git repository": "확장기능의 git 레포 URL",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "저해상도 이미지를 1차적으로 생성 후 업스케일을 진행하여, 이미지의 전체적인 구성을 바꾸지 않고 세부적인 디테일을 향상시킵니다.",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "저장 경로를 비워두면 기본 저장 폴더에 이미지들이 저장됩니다.",
"Use BLIP for caption": "캡션에 BLIP 사용",
"Use checkbox to enable the extension; it will be enabled or disabled when you click apply button": "체크박스를 이용해 적용할 확장기능을 선택하세요. 변경사항은 적용 후 UI 재시작 버튼을 눌러야 적용됩니다.",
"Use checkbox to mark the extension for update; it will be updated when you click apply button": "체크박스를 이용해 업데이트할 확장기능을 선택하세요. 업데이트는 적용 후 UI 재시작 버튼을 눌러야 적용됩니다.",
"Use deepbooru for caption": "캡션에 deepbooru 사용",
"Use dropout": "드롭아웃 사용",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "다음 태그들을 사용해 이미지 파일명 형식을 결정하세요 : [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]. 비워두면 기본값으로 설정됩니다.",

485
localizations/pt_BR.json Normal file
View File

@ -0,0 +1,485 @@
{
"⤡": "⤡",
"⊞": "⊞",
"×": "×",
"": "",
"": "",
"Loading...": "Carregando...",
"view": "ver",
"api": "api",
"•": "•",
"built with gradio": "criado com gradio",
"Stable Diffusion checkpoint": "Stable Diffusion checkpoint",
"txt2img": "txt2img",
"img2img": "img2img",
"Extras": "Extras",
"PNG Info": "Informações de PNG",
"Checkpoint Merger": "Fusão de Checkpoint",
"Train": "Treinar",
"Settings": "Configurações",
"Extensions": "Extensions",
"Prompt": "Prompt",
"Negative prompt": "Prompt negativo",
"Run": "Executar",
"Skip": "Pular",
"Interrupt": "Interromper",
"Generate": "Gerar",
"Style 1": "Estilo 1",
"Style 2": "Estilo 2",
"Label": "Rótulo",
"File": "Arquivo",
"Drop File Here": "Solte Aqui o Arquivo",
"-": "-",
"or": "ou",
"Click to Upload": "Clique para Carregar um Arquivo",
"Image": "Imagem",
"Check progress": "Checar progresso",
"Check progress (first)": "Checar progresso (primeiro)",
"Sampling Steps": "Passos de Amostragem",
"Sampling method": "Método de amostragem",
"Euler a": "Euler a",
"Euler": "Euler",
"LMS": "LMS",
"Heun": "Heun",
"DPM2": "DPM2",
"DPM2 a": "DPM2 a",
"DPM fast": "DPM fast",
"DPM adaptive": "DPM adaptive",
"LMS Karras": "LMS Karras",
"DPM2 Karras": "DPM2 Karras",
"DPM2 a Karras": "DPM2 a Karras",
"DDIM": "DDIM",
"PLMS": "PLMS",
"Width": "Largura",
"Height": "Altura",
"Restore faces": "Restaurar rostos",
"Tiling": "Ladrilhar",
"Highres. fix": "Ajuste de alta resolução",
"Firstpass width": "Primeira Passagem da largura",
"Firstpass height": "Primeira Passagem da altura",
"Denoising strength": "Denoising strength",
"Batch count": "Quantidade por lote",
"Batch size": "Quantidade de lotes",
"CFG Scale": "Escala CFG",
"Seed": "Seed",
"Extra": "Extra",
"Variation seed": "Variação de seed",
"Variation strength": "Força da variação",
"Resize seed from width": "Redimensionar a seed a partir da largura",
"Resize seed from height": "Redimensionar a seed a partir da altura",
"Script": "Script",
"None": "Nenhum",
"Prompt matrix": "Matriz de prompt",
"Prompts from file or textbox": "Prompts a partir de arquivo ou caixa de texto",
"X/Y plot": "X/Y plot",
"Put variable parts at start of prompt": "Coloca partes variáveis no começo do prompt",
"Iterate seed every line": "Iterar seed a cada linha",
"List of prompt inputs": "Lista de entrada de texto para prompt",
"Upload prompt inputs": "Carregar entrada de texto para prompt",
"X type": "Tipo do X",
"Nothing": "Nenhum",
"Var. seed": "Var. seed",
"Var. strength": "Var. da força",
"Steps": "Passos",
"Prompt S/R": "Prompt S/R",
"Prompt order": "Ordem de Prompt",
"Sampler": "Sampler",
"Checkpoint name": "Nome do Checkpoint",
"Hypernetwork": "Hypernetwork",
"Hypernet str.": "Força da Hypernet",
"Sigma Churn": "Sigma Churn",
"Sigma min": "Sigma min",
"Sigma max": "Sigma max",
"Sigma noise": "Sigma noise",
"Eta": "Tempo estimado",
"Clip skip": "Pular Clip",
"Denoising": "Denoising",
"Cond. Image Mask Weight": "Peso da Máscara Condicional de Imagem",
"X values": "Valores de X",
"Y type": "Tipo de Y",
"Y values": "Valores de Y",
"Draw legend": "Desenhar a legenda",
"Include Separate Images": "Incluir Imagens Separadas",
"Keep -1 for seeds": "Manter em -1 para seeds",
"Save": "Salvar",
"Send to img2img": "Mandar para img2img",
"Send to inpaint": "Mandar para inpaint",
"Send to extras": "Mandar para extras",
"Make Zip when Save?": "Criar um Zip quando salvar?",
"Textbox": "Caixa de texto",
"Interrogate\nCLIP": "Interrogatório\nCLIP",
"Inpaint": "Inpaint",
"Batch img2img": "Lote img2img",
"Image for img2img": "Imagem para img2img",
"Drop Image Here": "Solte a imagem aqui",
"Image for inpainting with mask": "Imagem para inpainting com máscara",
"Mask": "Máscara",
"Mask blur": "Desfoque da máscara",
"Mask mode": "Modo de máscara",
"Draw mask": "Desenhar máscara",
"Upload mask": "Carregar máscara",
"Masking mode": "Modo de máscara",
"Inpaint masked": "Inpaint o que está dentro da máscara",
"Inpaint not masked": "Inpaint o que está fora da máscara",
"Masked content": "Conteúdo mascarado",
"fill": "preencher",
"original": "original",
"latent noise": "latent noise",
"latent nothing": "latent nothing",
"Inpaint at full resolution": "Inpaint em resolução total",
"Inpaint at full resolution padding, pixels": "Inpaint de preenchimento em resolução total, pixels",
"Process images in a directory on the same machine where the server is running.": "Processar imagens no diretório da mesma maquina onde o servidor está rodando.",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "Usar um diretório vazio para salvar imagens, ao invés de salvá-las no diretório output.",
"Input directory": "Diretório de entrada",
"Output directory": "Diretório de saída",
"Resize mode": "Modo de redimensionamento",
"Just resize": "Apenas redimensionar",
"Crop and resize": "Cortar e redimensionar",
"Resize and fill": "Redimensionar e preencher",
"img2img alternative test": "Teste alternativo de img2img",
"Loopback": "Loopback",
"Outpainting mk2": "Outpainting mk2",
"Poor man's outpainting": "Poor man`s outpainting",
"SD upscale": "Ampliamento SD",
"should be 2 or lower.": "deve ser 2 ou menos.",
"Override `Sampling method` to Euler?(this method is built for it)": "Substituir `Método de amostragem` por Euler? (este método foi feito para isso)",
"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "Substituir `prompt` para o mesmo valor que o `prompt original`? (também para o `prompt negativo`)",
"Original prompt": "Prompt original",
"Original negative prompt": "Prompt negativo original",
"Override `Sampling Steps` to the same value as `Decode steps`?": "Substituir `Passos de Amostragem` para o mesmo valor que `Decodificar Passos`?",
"Decode steps": "Decode steps",
"Override `Denoising strength` to 1?": "Substituir `Quantidade do Denoise` para 1?",
"Decode CFG scale": "Decodificar escala CFG",
"Randomness": "Aleatoriedade",
"Sigma adjustment for finding noise for image": "Ajuste Sigma para encontrar ruído para imagem",
"Loops": "Loops",
"Denoising strength change factor": "Fator de mudança na quantidade do Denoise",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "Configurações recomendadas: Passos de amostragem: 80-100: Euler a, força do Denoise: 0.8",
"Pixels to expand": "Pixels para expandir",
"Outpainting direction": "Direção do outpainting",
"left": "esquerda",
"right": "direita",
"up": "cima",
"down": "baixo",
"Fall-off exponent (lower=higher detail)": "Expoente de queda (menor=mais detalhes)",
"Color variation": "Variação de cor",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "Amplia a imagem em dobro; ajusta a largura e altura para definir o tamanho do ladrilho",
"Tile overlap": "Sobreposição de ladrilho",
"Upscaler": "Ampliador",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
"ESRGAN_4x": "ESRGAN_4x",
"ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
"SwinIR 4x": "SwinIR 4x",
"Single Image": "Uma imagem",
"Batch Process": "Processo em lote",
"Batch from Directory": "Lote apartir de diretório",
"Source": "Origem",
"Show result images": "Mostrar imagens resultantes",
"Scale by": "Aumentar proporcionalmente em",
"Scale to": "Aumentar proporcionalmente para",
"Resize": "Redimensionar",
"Crop to fit": "Cortar para caber",
"Upscaler 2 visibility": "Visibilidade da ferramenta de ampliação 2",
"GFPGAN visibility": "Visibilidade GFPGAN",
"CodeFormer visibility": "Visibilidade CodeFormer",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "Peso do CodeFormer (0 = efeito máximo, 1 = efeito mínimo)",
"Upscale Before Restoring Faces": "Ampliar Antes de Refinar Rostos",
"Send to txt2img": "Mandar para txt2img",
"A merger of the two checkpoints will be generated in your": "Uma fusão dos dois checkpoints será gerada em seu",
"checkpoint": "checkpoint",
"directory.": "diretório.",
"Primary model (A)": "Modelo primário (A)",
"Secondary model (B)": "Modelo secundário (B)",
"Tertiary model (C)": "Modelo terciário (C)",
"Custom Name (Optional)": "Nome personalizado (Opcional)",
"Multiplier (M) - set to 0 to get model A": "Multiplicador (M) - definir em 0 para obter o modelo A",
"Interpolation Method": "Método de Interpolação",
"Weighted sum": "Soma de pesos",
"Add difference": "Acrescentar diferença",
"Save as float16": "Salvar como float16",
"See": "Ver",
"wiki": "wiki",
"for detailed explanation.": "para explicação detalhada.",
"Create embedding": "Criar incorporação",
"Create hypernetwork": "Criar hypernetwork",
"Preprocess images": "Pré-processar imagens",
"Name": "Nome",
"Initialization text": "Texto de inicialização",
"Number of vectors per token": "Número de vetores por token",
"Overwrite Old Embedding": "Substituir Incorporação anterior",
"Modules": "Módulos",
"Enter hypernetwork layer structure": "Entrar na estrutura de camadas da hypernetwork",
"Select activation function of hypernetwork": "Selecionar a função de ativação de hypernetwork",
"linear": "linear",
"relu": "relu",
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
"tanh": "tanh",
"sigmoid": "sigmoid",
"celu": "celu",
"gelu": "gelu",
"glu": "glu",
"hardshrink": "hardshrink",
"hardsigmoid": "hardsigmoid",
"hardtanh": "hardtanh",
"logsigmoid": "logsigmoid",
"logsoftmax": "logsoftmax",
"mish": "mish",
"prelu": "prelu",
"rrelu": "rrelu",
"relu6": "relu6",
"selu": "selu",
"silu": "silu",
"softmax": "softmax",
"softmax2d": "softmax2d",
"softmin": "softmin",
"softplus": "softplus",
"softshrink": "softshrink",
"softsign": "softsign",
"tanhshrink": "tanhshrink",
"threshold": "threshold",
"Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "Selecionar a inicialização de pesos de camada. relu-like - Kaiming, sigmoid-like - Xavier é recomendado",
"Normal": "Normal",
"KaimingUniform": "KaimingUniform",
"KaimingNormal": "KaimingNormal",
"XavierUniform": "XavierUniform",
"XavierNormal": "XavierNormal",
"Add layer normalization": "Adicionar normalização de camada",
"Use dropout": "Usar dropout",
"Overwrite Old Hypernetwork": "Sobrescrever Hypernetwork Anterior",
"Source directory": "Diretório de origem",
"Destination directory": "Diretório de destino",
"Existing Caption txt Action": "Ação de Título txt Já Existente",
"ignore": "ignorar",
"copy": "copiar",
"prepend": "adicionar ao início",
"append": "adicionar ao final",
"Create flipped copies": "Criar cópias espelhadas",
"Split oversized images into two": "Dividir imagens maiores em duas",
"Auto focal point crop": "Ajuste de corte em ponto focal automático",
"Use BLIP for caption": "Usar BLIP para o título",
"Use deepbooru for caption": "Usar deepbooru para o título",
"Split image threshold": "Limite de divisão de imagem",
"Split image overlap ratio": "Proporção de sobreposição da divisão de imagem",
"Focal point face weight": "Peso de ponto focal para rosto",
"Focal point entropy weight": "Peso de ponto focal para entropia",
"Focal point edges weight": "Peso de ponto focal para bordas",
"Create debug image": "Criar imagem de depuração",
"Preprocess": "Pré-processar",
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "Treinar uma incorporação; precisa especificar um diretório com imagens de proporção 1:1",
"[wiki]": "[wiki]",
"Embedding": "Incorporação",
"Embedding Learning rate": "Taxa de aprendizagem da incorporação",
"Hypernetwork Learning rate": "Taxa de aprendizagem de Hypernetwork",
"Dataset directory": "Diretório de Dataset",
"Log directory": "Diretório de Log",
"Prompt template file": "Arquivo padrão de Prompt",
"Max steps": "Passos máximos",
"Save an image to log directory every N steps, 0 to disable": "Salvar uma imagem no diretório de log a cada N passos. 0 para desativar",
"Save a copy of embedding to log directory every N steps, 0 to disable": "Salva uma cópia da incorporação no diretório de log a cada N passos. 0 para desativar",
"Save images with embedding in PNG chunks": "Salva imagens com incorporação em segmentos de PNG",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "Ler parâmetros (prompt, etc...) para a aba txt2img durante os previews",
"Train Hypernetwork": "Treinar Hypernetwork",
"Train Embedding": "Treinar Incorporação",
"Apply settings": "Aplicar configurações",
"Saving images/grids": "Salvar imagens/grades",
"Always save all generated images": "Sempre salvar todas as imagens geradas",
"File format for images": "Tipo de formato das imagens salvas",
"Images filename pattern": "Padrão de nomeação para imagens salvas",
"Add number to filename when saving": "Adicionar número para o nome do arquivo quando salvar",
"Always save all generated image grids": "Sempre salvar todas as grades de imagens",
"File format for grids": "Tipo de formato das grades de imagens salvas",
"Add extended info (seed, prompt) to filename when saving grid": "Adicionar informações extras (seed, prompt) para os arquivos quando gerar uma grade",
"Do not save grids consisting of one picture": "Não salvar grades de apenas uma imagem",
"Prevent empty spots in grid (when set to autodetect)": "Previnir espaços vazios na grade (quando marcado para autodetectar)",
"Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "Contagem de linhas da grade; -1 para autodetectar e 0 para ser igual ao valor do tamanho das levas",
"Save text information about generation parameters as chunks to png files": "Salvar informações de parâmetros de geração como segmentos png",
"Create a text file next to every image with generation parameters.": "Criar um arquivo de texto com informações de geração junto a cada imagem gerada.",
"Save a copy of image before doing face restoration.": "Salva uma cópia de cada imagem antes do refinamento facial.",
"Quality for saved jpeg images": "Qualidade das imagens jpeg",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "Se a imagem PNG for maior que 4MB ou qualquer dimensão maior que 4000, diminuir e salvar uma cópia em JPG",
"Use original name for output filename during batch process in extras tab": "Usar o nome original para os arquivos de output durante o processo de levas da aba Extras",
"When using 'Save' button, only save a single selected image": "Quando usar o botão `Salvar`, somente salvar as imagens selecionadas.",
"Do not add watermark to images": "Não adicionar marca dágua nas imagens",
"Paths for saving": "Caminhos para salvar",
"Output directory for images; if empty, defaults to three directories below": "Diretório de saída para imagens; se deixado em branco, as imagens vao para os seguintes diretórios",
"Output directory for txt2img images": "Diretório de Saída para imagens txt2img",
"Output directory for img2img images": "Diretório de Saída para imagens img2img",
"Output directory for images from extras tab": "Diretório de Saída para a aba Extras",
"Output directory for grids; if empty, defaults to two directories below": "Diretório de Saída para grades; se vazio, vão para os diretórios seguintes",
"Output directory for txt2img grids": "Diretório de Saída para grades de imagens txt2img",
"Output directory for img2img grids": "Diretório de Saída para grades de imagens img2img",
"Directory for saving images using the Save button": "Diretório para imagens salvas utilizando o botão de salvar",
"Saving to a directory": "Salvando para um diretório",
"Save images to a subdirectory": "Salvar imagens para um subdiretório",
"Save grids to a subdirectory": "Salvar grades de imagens para um subdiretório",
"When using \"Save\" button, save images to a subdirectory": "Quando usar o botão \"Salvar\", salvar imagens para um subdiretório",
"Directory name pattern": "Padrão de nome de diretório",
"Max prompt words for [prompt_words] pattern": "Número máximo de palavras do padrão de prompt [prompt_words]",
"Upscaling": "Ampliando",
"Tile size for ESRGAN upscalers. 0 = no tiling.": "Tamanho do ladrilho para ampliação ESRGAN. 0 = sem ladrilho.",
"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "Sobreposição de azulejo, em pixels, para amplicação ESRGAN. Valores baixos = linhas de fusão mais aparente.",
"Tile size for all SwinIR.": "Tamanho do ladrilho para todo SwinIR.",
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "Sobreposição de azulejo, em pixels, para SwinIR. Valores baixos = junção mais aparente.",
"LDSR processing steps. Lower = faster": "Steps de processamento LDSR. Menos = rápido",
"Upscaler for img2img": "Ampliação para img2img",
"Upscale latent space image when doing hires. fix": "Ampliar a imagem do espaço latente quando usando o ajuste de alta definição - hires. fix",
"Face restoration": "Refinamento de rosto",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "Parâmento de peso do CodeFormer; 0 = efeito máximo; 1 = efeito mínimo",
"Move face restoration model from VRAM into RAM after processing": "Mover o processo de refinamento de rosto da VRAM da placa de vídeo para a RAM do computador depois do processamento.",
"System": "Sistema",
"VRAM usage polls per second during generation. Set to 0 to disable.": "Levantamento de uso de VRAM por segundo durante gerações. Deixar em 0 para desativar.",
"Always print all generation info to standard output": "Sempre mostrar as informações de todas as gerações no padrão de output",
"Add a second progress bar to the console that shows progress for an entire job.": "Adicionar uma segunda barra de processamento no console que mostra a progressão de todo o trabalho.",
"Training": "Treinamento",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "Mover VAE e CLIP para a RAM quando treinando hypernetwork. Preserva VRAM.",
"Filename word regex": "Palavra de nome de arquivo regex",
"Filename join string": "Nome de arquivo join string",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "Número de repetições para entrada única de imagens por época; serve apenas para mostrar o número de época",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "Salvar um csv com as perdas para o diretório de log a cada N steps, 0 para desativar",
"Use cross attention optimizations while training": "Usar otimizações de atenção cruzada enquanto treinando",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "Checkpoints para manter no cache da RAM",
"Hypernetwork strength": "Força da Hypernetwork",
"Inpainting conditioning mask strength": "Força do inpaint para máscaras condicioniais",
"Apply color correction to img2img results to match original colors.": "Aplicar correção de cor nas imagens geradas em img2img, usando a imagem original como base.",
"Save a copy of image before applying color correction to img2img results": "Salvar uma cópia das imagens geradas em img2img antes de aplicar a correção de cor",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "Durante gerações img2img, fazer examente o número de steps definidos na barra (normalmente você faz menos steps com denoising menor).",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "Ativar quantização em K samples para resultados mais nítidos e visíveis. Pode alterar seeds ja existentes. Precisa reiniciar para funcionar.",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "Ênfase: usar parênteses ao redor de palavras (texto de exemplo) para fazer o modelo dar mais atenção para aquela palavra ou frase, e chaves [texto de exemplo] para tirar atenção",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "Usar método anterior de implementação de ênfase. Útil para reproduzir seeds antigas.",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "Faz as amostragens K-diffusion produzirem imagens iguais em lotes quando criando uma única imagem",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "Aumenta a coerência por preenchimento apartir da ultima vírgula dentro de n tokens quando usando mais de 75 tokens",
"Filter NSFW content": "Filtra conteúdos inadequados(geralmente +18)",
"Stop At last layers of CLIP model": "Para na última camada do modelo CLIP",
"Interrogate Options": "Opções de Interrogatório",
"Interrogate: keep models in VRAM": "Interrogar: manter modelos na VRAM",
"Interrogate: use artists from artists.csv": "Interrogar: usa artistas e estilos do documento artists.csv",
"Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "Interrogar: incluir classificação de tags de modelo combinando nos resultados (Não tem efeito na interrogação feita por legenda).",
"Interrogate: num_beams for BLIP": "Interrogar: num_beams para BLIP",
"Interrogate: minimum description length (excluding artists, etc..)": "Interrogar: tamanho mínimo da descrição (tirando artistas, etc..)",
"Interrogate: maximum description length": "Interrogar: tamanho máximo da descrição",
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP: número máximo de linhas no arquivo de texto(0 = Sem limites)",
"Interrogate: deepbooru score threshold": "Interrogatório: limite de score deepbooru",
"Interrogate: deepbooru sort alphabetically": "Interrogar: organizar deepbooru por ordem alfabética",
"use spaces for tags in deepbooru": "usar espaços para tags em deepbooru",
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "espaço (\\) colchetes em deepbooru (são usados como colchetes ao invés de dar ênfase)",
"User interface": "Interface de usuário",
"Show progressbar": "Mostrar barra de progresso",
"Show image creation progress every N sampling steps. Set 0 to disable.": "Mostrar a criação de imagens a cada N sampling steps. Em 1 já dá para ver o processo de geração. Marcar como 0 para desativar.",
"Show previews of all images generated in a batch as a grid": "Mostrar previsualização de todas as imagens geradas em leva numa grade",
"Show grid in results for web": "Mostrar grade em resultados para web",
"Do not show any images in results for web": "Não mostrar nenhuma imagem em resultados para web",
"Add model hash to generation information": "Adicionar hash do modelo para informação de geração",
"Add model name to generation information": "Adicionar nome do modelo para informação de geração",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "Quando ler parâmetros de texto para a interface (de informações de PNG ou texto copiado), não alterar o modelo/intervalo selecionado.",
"Send seed when sending prompt or image to other interface": "Enviar seed quando enviar prompt ou imagem para outra interface",
"Font for image grids that have text": "Fonte para grade de imagens que têm texto",
"Enable full page image viewer": "Ativar visualizador de página inteira",
"Show images zoomed in by default in full page image viewer": "Mostrar imagens com zoom por definição no visualizador de página inteira",
"Show generation progress in window title.": "Mostrar barra de progresso no nome da janela.",
"Quicksettings list": "Lista de configurações rapidas",
"Localization (requires restart)": "Localização (precisa reiniciar)",
"ar_AR": "ar_AR",
"de_DE": "de_DE",
"es_ES": "es_ES",
"fr_FR": "fr_FR",
"it_IT": "it_IT",
"ja_JP": "ja_JP",
"ko_KR": "ko_KR",
"pt_BR": "pt_BR",
"ru_RU": "ru_RU",
"tr_TR": "tr_TR",
"zh_CN": "zh_CN",
"zh_TW": "zh_TW",
"Sampler parameters": "Parâmetros de Amostragem",
"Hide samplers in user interface (requires restart)": "Esconder amostragens na interface de usuário (precisa reiniciar)",
"eta (noise multiplier) for DDIM": "tempo estimado (multiplicador de ruído) para DDIM",
"eta (noise multiplier) for ancestral samplers": "tempo estimado (multiplicador de ruído) para amostragens ancestrais",
"img2img DDIM discretize": "Discretização de img2img DDIM",
"uniform": "uniforme",
"quad": "quad",
"sigma churn": "sigma churn",
"sigma tmin": "sigma tmin",
"sigma noise": "sigma noise",
"Eta noise seed delta": "tempo estimado para ruído seed delta",
"Request browser notifications": "Solicitar notificações do navegador",
"Download localization template": "Baixar arquivo modelo de localização",
"Reload custom script bodies (No ui updates, No restart)": "Recarregar scripts personalizados (Sem atualizar a interface, Sem reiniciar)",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "Reiniciar Gradio e atualizar componentes (Scripts personalizados, ui.py, js e css)",
"Installed": "Instalado",
"Available": "Disponível",
"Install from URL": "Instalado de URL",
"Apply and restart UI": "Apicar e reiniciar a interface",
"Check for updates": "Procurar por atualizações",
"Extension": "Extensão",
"URL": "URL",
"Update": "Atualização",
"Load from:": "Carregar de:",
"Extension index URL": "Índice de extensão URL",
"URL for extension's git repository": "URL para repositório git da extensão",
"Local directory name": "Nome do diretório local",
"Install": "Instalar",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt (apertar Ctrl+Enter ou Alt+Enter para gerar)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "Prompt Negativo (apertar Ctrl+Enter ou Alt+Enter para gerar)",
"Add a random artist to the prompt.": "Adicionar um artista aleatório para o prompt.",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "Lê os parâmetros de geração do prompt ou da última geraçao, caso o prompt esteja vazio.",
"Save style": "Salva um estilo de prompt.",
"Apply selected styles to current prompt": "Aplica o estilo para o prompt atual.",
"Stop processing current image and continue processing.": "Pula a imagem sendo gerada e vai para a próxima.",
"Stop processing images and return any results accumulated so far.": "Interrompe o processo e mostra o que foi gerado até então.",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "Estilo para aplicar; também serve para o prompt negativo e vai preencher se usado.",
"Do not do anything special": "Não faça nada de especial",
"Which algorithm to use to produce the image": "O tipo de algoritmo para gerar imagens.",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - cria mais variações para as imagens em diferentes passos. Mais que 40 passos cancela o efeito.",
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models - Funciona melhor para inpainting.",
"Produce an image that can be tiled.": "Produz uma imagem que pode ser ladrilhada.",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "Cria um processo em duas etapas, com uma imagem em baixa qualidade primeiro, aumenta a imagem e refina os detalhes sem alterar a composição da imagem",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Quanto o algoritmo deve manter da imagem original. Em 0, nada muda. Em 1 o algoritmo ignora a imagem original. Valores menores que 1.0 demoram mais.",
"How many batches of images to create": "Quantos lotes de imagens criar",
"How many image to create in a single batch": "Quantas imagens criar em um único lote",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - Quanto maior o valor, mais segue o prompt e quanto menor, menor segue.",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Codigo de geração de uma imagem - criando uma imagem com os mesmos parâmetros e seed trazem o mesmo resultado.",
"Set seed to -1, which will cause a new random number to be used every time": "Define seed como -1, deixando o valor que vai aparecer como aleatório.",
"Reuse seed from last generation, mostly useful if it was randomed": "Reutilizar a seed da última geração, útil principalmente se ela foi aleatória",
"Seed of a different picture to be mixed into the generation.": "Seed de uma imagem diferente é misturada na geração.",
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Qual a variação a ser gerada. Em 0, não tem efeito. Em 1, gera uma imagem completa com a variação de seed, (exceto com amostragens a).",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Tenta gerar uma imagem similar ao que teria sido feito com a mesma seed em dimensões especifica.",
"Separate values for X axis using commas.": "Separa os valores para o eixo X usando vírgulas.",
"Separate values for Y axis using commas.": "Separa os valores para o eixo Y usando vírgulas.",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "Salva a imagem no diretório padrão ou escolhido e cria um arquivo csv com os parâmetros da geração.",
"Open images output directory": "Abre o diretório de saída de imagens.",
"How much to blur the mask before processing, in pixels.": "Transição do contorno da máscara, em pixels.",
"What to put inside the masked area before processing it with Stable Diffusion.": "O que vai dentro da máscara antes de processá-la com Stable Diffusion.",
"fill it with colors of the image": "Preenche usando as cores da imagem.",
"keep whatever was there originally": "manter usando o que estava lá originalmente",
"fill it with latent space noise": "Preenche com ruídos do espaço latente.",
"fill it with latent space zeroes": "Preenche com zeros do espaço latente.",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "Faz ampliação na região com máscara para atingir a resolução desejada, faz inpainting, faz downscale para voltar à resolução original e cola na imagem original",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "Redimensiona a imagem para a resolução desejada. A menos que a altura e a largura sejam iguais, você obterá uma proporção incorreta.",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "Redimensiona a imagem para que toda a resolução desejada seja preenchida com a imagem. Corta as partes que ficaram pra fora.",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "Redimensiona a imagem para que toda a imagem esteja dentro da resolução desejada. Preenche o espaço vazio com as cores da imagem.",
"How many times to repeat processing an image and using it as input for the next iteration": "Número de vezes que vai repetir o processamento da imagem e usar como entrada para a próxima iteração",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "No modo de loopback, em cada loop a força do denoise é multiplicado por este valor. <1 significa diminuir a variedade para que sua sequência converta em uma imagem fixa. >1 significa aumentar a variedade para que sua sequência se torne cada vez mais caótica.",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "Para ampliação SD, quantidade de sobreposição em pixels que deve haver entre os ladrilhos. Os ladrilhos se sobrepõem para que, quando forem mesclados de volta em uma imagem, não haja linhas de junção claramente visíveis.",
"A directory on the same machine where the server is running.": "Um diretório na mesma máquina onde o servidor está rodando.",
"Leave blank to save images to the default path.": "Deixar em branco para salvar imagens no caminho padrão.",
"Result = A * (1 - M) + B * M": "Resultado = A * (1 - M) + B * M",
"Result = A + (B - C) * M": "Resultado = A + (B - C) * M",
"1st and last digit must be 1. ex:'1, 2, 1'": "Primeiro e último dígito precisam ser 1. ex:`1, 2, 1`",
"Path to directory with input images": "Caminho para o diretório com imagens de entrada",
"Path to directory where to write outputs": "Caminho para o diretório para gravar as saídas",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "Usa essas tags para definir como os nomes dos arquivos sao escolhidos: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; deixe em branco para manter o padrão.",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "Se esta opção estiver marcada, as imagens não vão ter marca d`água. Aviso: se você não quer a marca d`água, você pode estar se envolvendo em comportamentos antiéticos",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.": "Usa essas tags para definir como os nomes dos subdiretorios e grades são escolhidos: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; deixe em branco para manter o padrão.",
"Restore low quality faces using GFPGAN neural network": "Restaurar rostos de baixa qualidade usando a rede neural GFPGAN",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "Esta expressão regular vai retirar palavras do nome do arquivo e serão juntadas via regex usando a opção abaixo em etiquetas usadas em treinamento. Não mexer para manter os nomes como estão.",
"This string will be used to join split words into a single line if the option above is enabled.": "Esta string será usada para unir palavras divididas em uma única linha se a opção acima estiver habilitada.",
"Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "Aplicável somente para modelos de inpaint. Determina quanto deve mascarar da imagem original para inpaint e img2img. 1.0 significa totalmente mascarado, que é o comportamento padrão. 0.0 significa uma condição totalmente não mascarada. Valores baixos ajudam a preservar a composição geral da imagem, mas vai encontrar dificuldades com grandes mudanças.",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "Lista de nomes de configurações, separados por vírgulas, para configurações que devem ir para a barra de acesso rápido na parte superior, em vez da guia de configuração usual. Veja modules/shared.py para nomes de configuração. Necessita reinicialização para aplicar.",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "Se este valor for diferente de zero, ele será adicionado à seed e usado para inicializar o RNG para ruídos ao usar amostragens com Tempo Estimado. Você pode usar isso para produzir ainda mais variações de imagens ou pode usar isso para combinar imagens de outro software se souber o que está fazendo."
"Leave empty for auto": "Deixar desmarcado para automático"
}

488
localizations/zh_TW.json Normal file
View File

@ -0,0 +1,488 @@
{
"⤡": "⤡",
"⊞": "⊞",
"×": "×",
"": "",
"": "",
"Loading...": "載入中…",
"view": "檢視",
"api": "api",
"•": "•",
"built with gradio": "基於 Gradio 構建",
"Stable Diffusion checkpoint": "Stable Diffusion 模型權重存檔點",
"txt2img": "文生圖",
"img2img": "圖生圖",
"Extras": "後處理",
"PNG Info": "PNG 資訊",
"Checkpoint Merger": "模型權重存檔點合併工具",
"Train": "訓練",
"Create aesthetic embedding": "生成美術風格 embedding",
"Image Browser": "圖庫瀏覽器",
"History": "歷史記錄",
"Settings": "設定",
"Prompt": "提示詞",
"Negative prompt": "反向提示詞",
"Run": "執行",
"Skip": "跳過",
"Interrupt": "中止",
"Generate": "生成",
"Style 1": "模版風格 1",
"Style 2": "模版風格 2",
"Label": "標籤",
"File": "檔案",
"Drop File Here": "拖曳檔案到此",
"-": "-",
"or": "或",
"Click to Upload": "點擊上傳",
"Image": "圖像",
"Check progress": "檢視進度",
"Check progress (first)": "(首次)檢視進度",
"Sampling Steps": "採樣疊代步數",
"Sampling method": "採樣方法",
"Euler a": "Euler a",
"Euler": "Euler",
"LMS": "LMS",
"Heun": "Heun",
"DPM2": "DPM2",
"DPM2 a": "DPM2 a",
"DPM fast": "DPM fast",
"DPM adaptive": "DPM adaptive",
"LMS Karras": "LMS Karras",
"DPM2 Karras": "DPM2 Karras",
"DPM2 a Karras": "DPM2 a Karras",
"DDIM": "DDIM",
"PLMS": "PLMS",
"Width": "寬度",
"Height": "高度",
"Restore faces": "面部修復",
"Tiling": "可平鋪",
"Highres. fix": "高解析度修復",
"Firstpass width": "第一遍的寬度",
"Firstpass height": "第一遍的高度",
"Denoising strength": "去噪強度",
"Batch count": "批次",
"Batch size": "批量",
"CFG Scale": "提示詞相關性CFG",
"Seed": "隨機種子",
"Extra": "額外參數",
"Variation seed": "差異隨機種子",
"Variation strength": "差異強度",
"Resize seed from width": "自寬度縮放隨機種子",
"Resize seed from height": "自高度縮放隨機種子",
"Open for Clip Aesthetic!": "打開美術風格 Clip!",
"▼": "▼",
"Aesthetic weight": "美術風格權重",
"Aesthetic steps": "美術風格疊代步數",
"Aesthetic learning rate": "美術風格學習率",
"Slerp interpolation": "Slerp 插值",
"Aesthetic imgs embedding": "美術風格圖集 embedding",
"None": "無",
"Aesthetic text for imgs": "該圖集的美術風格描述",
"Slerp angle": "Slerp 角度",
"Is negative text": "是反向提示詞",
"Script": "指令碼",
"Embedding to Shareable PNG": "將 Embedding 轉換為可分享的 PNG",
"Prompt matrix": "提示詞矩陣",
"Prompts from file or textbox": "從文字方塊或檔案載入提示詞",
"X/Y plot": "X/Y 圖表",
"Source embedding to convert": "用於轉換的源 Embedding",
"Embedding token": "Embedding 的關鍵詞",
"Put variable parts at start of prompt": "把變量部分放在提示詞文本的開頭",
"Show Textbox": "顯示文字方塊",
"File with inputs": "含輸入內容的檔案",
"Prompts": "提示詞",
"X type": "X軸類型",
"Nothing": "無",
"Var. seed": "差異隨機種子",
"Var. strength": "差異強度",
"Steps": "疊代步數",
"Prompt S/R": "提示詞替換",
"Prompt order": "提示詞順序",
"Sampler": "採樣器",
"Checkpoint name": "模型權重存檔點的名稱",
"Hypernetwork": "Hypernetwork",
"Hypernet str.": "Hypernetwork 強度",
"Sigma Churn": "Sigma Churn",
"Sigma min": "最小 Sigma",
"Sigma max": "最大 Sigma",
"Sigma noise": "Sigma noise",
"Eta": "Eta",
"Clip skip": "Clip 跳過",
"Denoising": "去噪",
"X values": "X軸數值",
"Y type": "Y軸類型",
"Y values": "Y軸數值",
"Draw legend": "在圖表中包括軸標題",
"Include Separate Images": "包括獨立的圖像",
"Keep -1 for seeds": "保持隨機種子為-1",
"Drop Image Here": "拖曳圖像到此",
"Save": "儲存",
"Send to img2img": ">> 圖生圖",
"Send to inpaint": ">> 內補繪製",
"Send to extras": ">> 後處理",
"Make Zip when Save?": "儲存時生成ZIP壓縮檔案",
"Textbox": "文字方塊",
"Interrogate\nCLIP": "CLIP\n反推提示詞",
"Interrogate\nDeepBooru": "DeepBooru\n反推提示詞",
"Inpaint": "內補繪製",
"Batch img2img": "批量圖生圖",
"Image for img2img": "圖生圖的圖像",
"Image for inpainting with mask": "用於內補繪製蒙版內容的圖像",
"Mask": "蒙版",
"Mask blur": "蒙版模糊",
"Mask mode": "蒙版模式",
"Draw mask": "繪製蒙版",
"Upload mask": "上傳蒙版",
"Masking mode": "蒙版模式",
"Inpaint masked": "內補繪製蒙版內容",
"Inpaint not masked": "內補繪製非蒙版內容",
"Masked content": "蒙版蒙住的內容",
"fill": "填充",
"original": "原圖",
"latent noise": "潛空間噪聲",
"latent nothing": "潛空間數值零",
"Inpaint at full resolution": "以完整解析度進行內補繪製",
"Inpaint at full resolution padding, pixels": "以完整解析度進行內補繪製 — 填補畫素",
"Process images in a directory on the same machine where the server is running.": "在伺服器主機上的目錄中處理圖像",
"Use an empty output directory to save pictures normally instead of writing to the output directory.": "指定一個空的資料夾為輸出目錄而非預設的 output 資料夾為輸出目錄",
"Disabled when launched with --hide-ui-dir-config.": "啟動 --hide-ui-dir-config 時禁用",
"Input directory": "輸入目錄",
"Output directory": "輸出目錄",
"Resize mode": "縮放模式",
"Just resize": "只縮放",
"Crop and resize": "縮放並剪裁",
"Resize and fill": "縮放並填充",
"img2img alternative test": "圖生圖的另一種測試",
"Loopback": "回送",
"Outpainting mk2": "外補繪製第二版",
"Poor man's outpainting": "效果稍差的外補繪製",
"SD upscale": "使用 SD 放大",
"should be 2 or lower.": "必須小於等於2",
"Override `Sampling method` to Euler?(this method is built for it)": "覆寫「採樣方法」為 Euler這個方法就是為這樣做設計的",
"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "覆寫「提示詞」為「初始提示詞」?(包括「反向提示詞」)",
"Original prompt": "初始提示詞",
"Original negative prompt": "初始反向提示詞",
"Override `Sampling Steps` to the same value as `Decode steps`?": "覆寫「採樣疊代步數」為「解碼疊代步數」?",
"Decode steps": "解碼疊代步數",
"Override `Denoising strength` to 1?": "覆寫「去噪強度」為1?",
"Decode CFG scale": "解碼提示詞相關性CFG",
"Randomness": "隨機度",
"Sigma adjustment for finding noise for image": "為尋找圖中噪點的 Sigma 調整",
"Loops": "疊代次數",
"Denoising strength change factor": "去噪強度的調整係數",
"Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "推薦設定採樣疊代步數80-100採樣器Euler a去噪強度0.8",
"Pixels to expand": "拓展的畫素數",
"Outpainting direction": "外補繪製的方向",
"left": "左",
"right": "右",
"up": "上",
"down": "下",
"Fall-off exponent (lower=higher detail)": "衰減指數(越低細節越好)",
"Color variation": "色彩變化",
"Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "將圖像放大到兩倍尺寸; 使用寬度和高度滑塊設定圖塊尺寸",
"Tile overlap": "圖塊重疊的畫素",
"Upscaler": "放大演算法",
"Lanczos": "Lanczos",
"LDSR": "LDSR",
"BSRGAN 4x": "BSRGAN 4x",
"ESRGAN_4x": "ESRGAN_4x",
"R-ESRGAN 4x+ Anime6B": "R-ESRGAN 4x+ Anime6B",
"ScuNET GAN": "ScuNET GAN",
"ScuNET PSNR": "ScuNET PSNR",
"SwinIR_4x": "SwinIR 4x",
"Single Image": "單個圖像",
"Batch Process": "批量處理",
"Batch from Directory": "從目錄進行批量處理",
"Source": "來源",
"Show result images": "顯示輸出圖像",
"Scale by": "等比縮放",
"Scale to": "指定尺寸縮放",
"Resize": "縮放",
"Crop to fit": "裁剪以適應",
"Upscaler 2": "放大演算法 2",
"Upscaler 2 visibility": "放大演算法 2 可見度",
"GFPGAN visibility": "GFPGAN 可見度",
"CodeFormer visibility": "CodeFormer 可見度",
"CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "CodeFormer 權重 0 = 最大效果, 1 = 最小效果)",
"Open output directory": "打開輸出目錄",
"Send to txt2img": ">> 文生圖",
"A merger of the two checkpoints will be generated in your": "合併後的模型權重存檔點會生成在你的",
"checkpoint": "模型權重存檔點",
"directory.": "目錄",
"Primary model (A)": "主要模型 (A)",
"Secondary model (B)": "第二模型 (B)",
"Tertiary model (C)": "第三模型 (C)",
"Custom Name (Optional)": "自訂名稱 (可選)",
"Multiplier (M) - set to 0 to get model A": "倍率 (M) - 設為 0 等價於模型 A",
"Interpolation Method": "插值方法",
"Weighted sum": "加權和",
"Add difference": "加入差分",
"Save as float16": "以 float16 儲存",
"See": "檢視",
"wiki": "wiki",
"for detailed explanation.": "以了解詳細說明",
"Create embedding": "生成 embedding",
"Create aesthetic images embedding": "生成美術風格圖集 embedding",
"Create hypernetwork": "生成 hypernetwork",
"Preprocess images": "圖像預處理",
"Name": "名稱",
"Initialization text": "初始化文字",
"Number of vectors per token": "每個 token 的向量數",
"Overwrite Old Embedding": "覆寫舊的 Embedding",
"Modules": "模組",
"Enter hypernetwork layer structure": "輸入 hypernetwork 層結構",
"Select activation function of hypernetwork": "選擇 hypernetwork 的激活函數",
"linear": "linear",
"relu": "relu",
"leakyrelu": "leakyrelu",
"elu": "elu",
"swish": "swish",
"Add layer normalization": "加入層標準化",
"Use dropout": "採用 dropout 防止過擬合",
"Overwrite Old Hypernetwork": "覆寫舊的 Hypernetwork",
"Source directory": "來源目錄",
"Destination directory": "目標目錄",
"Existing Caption txt Action": "對已有的TXT說明文字的行為",
"ignore": "無視",
"copy": "複製",
"prepend": "放前面",
"append": "放後面",
"Create flipped copies": "生成鏡像副本",
"Split oversized images into two": "將過大的圖像分為兩份",
"Split oversized images": "分割過大的圖像",
"Use BLIP for caption": "使用 BLIP 生成說明文字(自然語言描述)",
"Use deepbooru for caption": "使用 deepbooru 生成說明文字(標籤)",
"Split image threshold": "圖像分割閾值",
"Split image overlap ratio": "分割圖像重疊的比率",
"Preprocess": "預處理",
"Train an embedding; must specify a directory with a set of 1:1 ratio images": "訓練 embedding 必須指定一組具有 1:1 比例圖像的目錄",
"Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "訓練 embedding 或者 hypernetwork 必須指定一組具有 1:1 比例圖像的目錄",
"[wiki]": "[wiki]",
"Embedding": "Embedding",
"Embedding Learning rate": "Embedding 學習率",
"Hypernetwork Learning rate": "Hypernetwork 學習率",
"Learning rate": "學習率",
"Dataset directory": "資料集目錄",
"Log directory": "日誌目錄",
"Prompt template file": "提示詞模版檔案",
"Max steps": "最大疊代步數",
"Save an image to log directory every N steps, 0 to disable": "每 N 步儲存一個圖像到日誌目錄0 表示禁用",
"Save a copy of embedding to log directory every N steps, 0 to disable": "每 N 步將 embedding 的副本儲存到日誌目錄0 表示禁用",
"Save images with embedding in PNG chunks": "儲存圖像並在 PNG 檔案中嵌入 embedding 檔案",
"Read parameters (prompt, etc...) from txt2img tab when making previews": "進行預覽時從文生圖頁籤中讀取參數(提示詞等)",
"Train Hypernetwork": "訓練 Hypernetwork",
"Train Embedding": "訓練 Embedding",
"Create an aesthetic embedding out of any number of images": "從任意數量的圖像中建立美術風格 embedding",
"Create images embedding": "生成圖集 embedding",
"txt2img history": "文生圖歷史記錄",
"img2img history": "圖生圖歷史記錄",
"extras history": "後處理歷史記錄",
"Renew Page": "刷新頁面",
"extras": "後處理",
"favorites": "收藏夾",
"custom fold": "自訂資料夾",
"Load": "載入",
"Images directory": "圖像目錄",
"Prev batch": "上一批",
"Next batch": "下一批",
"First Page": "首頁",
"Prev Page": "上一頁",
"Page Index": "頁數",
"Next Page": "下一頁",
"End Page": "尾頁",
"number of images to delete consecutively next": "接下來要連續刪除的圖像數",
"Delete": "刪除",
"Generate Info": "生成資訊",
"File Name": "檔案名",
"Collect": "收藏",
"Refresh page": "刷新頁面",
"Date to": "日期至",
"Number": "數量",
"set_index": "設定索引",
"Checkbox": "核取方塊",
"Apply settings": "儲存設定",
"Saving images/grids": "儲存圖像/概覽圖",
"Always save all generated images": "始終儲存所有生成的圖像",
"File format for images": "圖像的檔案格式",
"Images filename pattern": "圖像檔案名格式",
"Always save all generated image grids": "始終儲存所有生成的概覽圖",
"File format for grids": "概覽圖的檔案格式",
"Add extended info (seed, prompt) to filename when saving grid": "儲存概覽時將擴展資訊(隨機種子,提示詞)加入到檔案名",
"Do not save grids consisting of one picture": "只有一張圖片時不要儲存概覽圖",
"Prevent empty spots in grid (when set to autodetect)": "(在自動檢測時)防止概覽圖中出現空位",
"Grid row count; use -1 for autodetect and 0 for it to be same as batch size": "概覽行數; 使用 -1 進行自動檢測,使用 0 使其與批量大小相同",
"Save text information about generation parameters as chunks to png files": "將有關生成參數的文本資訊作為塊儲存到PNG檔案中",
"Create a text file next to every image with generation parameters.": "儲存圖像時在每個圖像旁邊建立一個文本檔案儲存生成參數",
"Save a copy of image before doing face restoration.": "在進行面部修復之前儲存圖像副本",
"Quality for saved jpeg images": "儲存的JPEG圖像的品質",
"If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG": "如果 PNG 圖像大於 4MB 或寬高大於 4000則縮小並儲存副本為 JPG",
"Use original name for output filename during batch process in extras tab": "在後處理頁籤中的批量處理過程中使用原始名稱作為輸出檔案名",
"When using 'Save' button, only save a single selected image": "使用「儲存」按鈕時,只儲存一個選定的圖像",
"Do not add watermark to images": "不要給圖像加浮水印",
"Paths for saving": "儲存路徑",
"Output directory for images; if empty, defaults to three directories below": "圖像的輸出目錄; 如果為空,則預設為以下三個目錄",
"Output directory for txt2img images": "文生圖的輸出目錄",
"Output directory for img2img images": "圖生圖的輸出目錄",
"Output directory for images from extras tab": "後處理的輸出目錄",
"Output directory for grids; if empty, defaults to two directories below": "概覽圖的輸出目錄; 如果為空,則預設為以下兩個目錄",
"Output directory for txt2img grids": "文生圖概覽的輸出目錄",
"Output directory for img2img grids": "圖生圖概覽的輸出目錄",
"Directory for saving images using the Save button": "使用「儲存」按鈕儲存圖像的目錄",
"Saving to a directory": "儲存到目錄",
"Save images to a subdirectory": "將圖像儲存到子目錄",
"Save grids to a subdirectory": "將概覽圖儲存到子目錄",
"When using \"Save\" button, save images to a subdirectory": "使用「儲存」按鈕時,將圖像儲存到子目錄",
"Directory name pattern": "目錄名稱格式",
"Max prompt words for [prompt_words] pattern": "[prompt_words] 格式的最大提示詞數量",
"Upscaling": "放大",
"Tile size for ESRGAN upscalers. 0 = no tiling.": "ESRGAN 的圖塊尺寸。0 = 不分塊",
"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.": "ESRGAN 的圖塊重疊畫素。低值 = 可見接縫",
"Tile size for all SwinIR.": "適用所有 SwinIR 系演算法的圖塊尺寸",
"Tile overlap, in pixels for SwinIR. Low values = visible seam.": "SwinIR 的圖塊重疊畫素。低值 = 可見接縫",
"LDSR processing steps. Lower = faster": "LDSR 處理疊代步數。更低 = 更快",
"Upscaler for img2img": "圖生圖的放大演算法",
"Upscale latent space image when doing hires. fix": "做高解析度修復時也放大潛空間圖像",
"Face restoration": "面部修復",
"CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect": "CodeFormer 權重參數; 0 = 最大效果; 1 = 最小效果",
"Move face restoration model from VRAM into RAM after processing": "面部修復處理完成後將面部修復模型從顯存VRAM移至內存RAM",
"System": "系統",
"VRAM usage polls per second during generation. Set to 0 to disable.": "生成圖像時每秒輪詢顯存VRAM使用情況的次數。設定為 0 以禁用",
"Always print all generation info to standard output": "始終將所有生成資訊輸出到 standard output (一般為控制台)",
"Add a second progress bar to the console that shows progress for an entire job.": "向控制台加入第二個進度列,顯示整個作業的進度",
"Training": "訓練",
"Unload VAE and CLIP from VRAM when training": "訓練時從顯存VRAM中取消 VAE 和 CLIP 的載入",
"Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM.": "訓練時將 VAE 和 CLIP 從顯存VRAM移放到內存RAM節省顯存VRAM",
"Filename word regex": "檔案名用詞的正則表達式",
"Filename join string": "檔案名連接用字串",
"Number of repeats for a single input image per epoch; used only for displaying epoch number": "每個 epoch 中單個輸入圖像的重複次數; 僅用於顯示 epoch 數",
"Save an csv containing the loss to log directory every N steps, 0 to disable": "每 N 步儲存一個包含 loss 的CSV到日誌目錄0 表示禁用",
"Stable Diffusion": "Stable Diffusion",
"Checkpoints to cache in RAM": "快取在內存RAM中的模型權重存檔點",
"Hypernetwork strength": "Hypernetwork 強度",
"Apply color correction to img2img results to match original colors.": "對圖生圖結果套用顏色校正以匹配原始顏色",
"Save a copy of image before applying color correction to img2img results": "在對圖生圖結果套用顏色校正之前儲存圖像副本",
"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising).": "在進行圖生圖的時候,確切地執行滑塊指定的疊代步數(正常情況下更弱的去噪需要更少的疊代步數)",
"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply.": "在 K 採樣器中啟用量化以獲得更清晰,更清晰的結果。這可能會改變現有的隨機種子。需要重新啟動才能套用",
"Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention": "強調符:使用 (文字) 使模型更關注該文本,使用 [文字] 使其減少關注",
"Use old emphasis implementation. Can be useful to reproduce old seeds.": "使用舊的強調符實作。可用於復現舊隨機種子",
"Make K-diffusion samplers produce same images in a batch as when making a single image": "使 K-diffusion 採樣器批量生成與生成單個圖像時產出相同的圖像",
"Increase coherency by padding from the last comma within n tokens when using more than 75 tokens": "當使用超過 75 個 token 時,通過從 n 個 token 中的最後一個逗號填補來提高一致性",
"Filter NSFW content": "過濾成人內容",
"Stop At last layers of CLIP model": "在 CLIP 模型的最後哪一層停下",
"Interrogate Options": "反推提示詞選項",
"Interrogate: keep models in VRAM": "反推: 將模型儲存在顯存VRAM中",
"Interrogate: use artists from artists.csv": "反推: 使用 artists.csv 中的藝術家",
"Interrogate: include ranks of model tags matches in results (Has no effect on caption-based interrogators).": "反推: 在生成結果中包含與模型標籤相匹配的等級(對基於生成自然語言描述的反推沒有影響)",
"Interrogate: num_beams for BLIP": "反推: BLIP 的 num_beams",
"Interrogate: minimum description length (excluding artists, etc..)": "反推: 最小描述長度(不包括藝術家, 等…)",
"Interrogate: maximum description length": "反推: 最大描述長度",
"CLIP: maximum number of lines in text file (0 = No limit)": "CLIP 文本檔案中的最大行數0 = 無限制)",
"Interrogate: deepbooru score threshold": "反推: deepbooru 分數閾值",
"Interrogate: deepbooru sort alphabetically": "反推: deepbooru 按字母順序排序",
"use spaces for tags in deepbooru": "在 deepbooru 中為標籤使用空格",
"escape (\\) brackets in deepbooru (so they are used as literal brackets and not for emphasis)": "在 deepbooru 中使用轉義 (\\) 括號(因此它們用作文字括號而不是強調符號)",
"User interface": "使用者介面",
"Show progressbar": "顯示進度列",
"Show image creation progress every N sampling steps. Set 0 to disable.": "每 N 個採樣疊代步數顯示圖像生成進度。設定 0 禁用",
"Show previews of all images generated in a batch as a grid": "以網格的形式預覽所有批量生成出來的圖像",
"Show grid in results for web": "在網頁的結果中顯示概覽圖",
"Do not show any images in results for web": "不在網頁的結果中顯示任何圖像",
"Add model hash to generation information": "將模型的雜湊值加入到生成資訊",
"Add model name to generation information": "將模型名稱加入到生成資訊",
"When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint.": "當從文本讀取生成參數到 UI從 PNG 資訊或粘貼文本)時,不要更改選定的模型權重存檔點",
"Font for image grids that have text": "有文字的概覽圖使用的字體",
"Enable full page image viewer": "啟用整頁圖像檢視器",
"Show images zoomed in by default in full page image viewer": "在整頁圖像檢視器中預設放大顯示圖像",
"Show generation progress in window title.": "在視窗標題中顯示生成進度",
"Quicksettings list": "快速設定列表",
"Localization (requires restart)": "本地化(需要重新啟動)",
"Sampler parameters": "採樣器參數",
"Hide samplers in user interface (requires restart)": "在使用者介面中隱藏採樣器(需要重新啟動)",
"eta (noise multiplier) for DDIM": "DDIM 的 eta (噪聲乘數)",
"eta (noise multiplier) for ancestral samplers": "ancestral 採樣器的 eta (噪聲乘數)",
"img2img DDIM discretize": "圖生圖 DDIM 離散化",
"uniform": "均勻",
"quad": "二階",
"sigma churn": "sigma churn",
"sigma tmin": "最小(tmin) sigma",
"sigma noise": "sigma 噪聲",
"Eta noise seed delta": "Eta 噪聲種子偏移noise seed delta",
"Images Browser": "圖庫瀏覽器",
"Preload images at startup": "在啟動時預載圖像",
"Number of columns on the page": "每頁列數",
"Number of rows on the page": "每頁行數",
"Number of pictures displayed on each page": "每頁顯示的圖像數量",
"Minimum number of pages per load": "每次載入的最小頁數",
"Number of grids in each row": "每行顯示多少格",
"Wildcards": "萬用字元",
"Use same seed for all images": "為所有圖像使用同一個隨機種子",
"Request browser notifications": "請求瀏覽器通知",
"Download localization template": "下載本地化模板",
"Reload custom script bodies (No ui updates, No restart)": "重新載入自訂指令碼主體無UI更新無重啟",
"Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)": "重啟 Gradio 及刷新組件僅限自訂指令碼ui.pyJS 和 CSS",
"Prompt (press Ctrl+Enter or Alt+Enter to generate)": "提示詞(按 Ctrl+Enter 或 Alt+Enter 生成)",
"Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "反向提示詞(按 Ctrl+Enter 或 Alt+Enter 生成)",
"Add a random artist to the prompt.": "隨機加入一個藝術家到提示詞中",
"Read generation parameters from prompt or last generation if prompt is empty into user interface.": "從提示詞中讀取生成參數,如果提示詞為空,則讀取上一次的生成參數到使用者介面",
"Save style": "存儲為模板風格",
"Apply selected styles to current prompt": "將所選樣式套用於當前提示",
"Stop processing current image and continue processing.": "停止處理當前圖像並繼續處理下一個",
"Stop processing images and return any results accumulated so far.": "停止處理圖像並返回迄今為止累積的任何結果",
"Style to apply; styles have components for both positive and negative prompts and apply to both": "要套用的模版風格; 模版風格包含正向和反向提示詞,並套用於兩者",
"Do not do anything special": "什麼都不做",
"Which algorithm to use to produce the image": "使用哪種演算法生成圖像",
"Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral - 非常有創意,可以根據疊代步數獲得完全不同的圖像,將疊代步數設定為高於 30-40 不會有正面作用",
"Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit models - 最擅長內補繪製",
"Produce an image that can be tiled.": "生成可用於平舖的圖像",
"Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "使用兩步處理的時候以較小的解析度生成初步圖像,接著放大圖像,然後在不更改構圖的情況下改進其中的細節",
"Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "決定演算法對圖像內容的影響程度。設定 0 時,什麼都不會改變,而在 1 時,你將獲得不相關的圖像。值低於 1.0 時,處理的疊代步數將少於「採樣疊代步數」滑塊指定的步數",
"How many batches of images to create": "建立多少批次的圖像",
"How many image to create in a single batch": "每批建立多少圖像",
"Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "Classifier Free Guidance Scale - 圖像應在多大程度上服從提示詞 - 較低的值會產生更有創意的結果",
"A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "一個固定隨機數生成器輸出的值 — 以相同參數和隨機種子生成的圖像會得到相同的結果",
"Set seed to -1, which will cause a new random number to be used every time": "將隨機種子設定為-1則每次都會使用一個新的隨機數",
"Reuse seed from last generation, mostly useful if it was randomed": "重用上一次使用的隨機種子,如果想要固定結果就會很有用",
"Seed of a different picture to be mixed into the generation.": "將要參與生成的另一張圖的隨機種子",
"How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "想要產生多強烈的變化。設為 0 時,將沒有效果。設為 1 時你將獲得完全產自差異隨機種子的圖像ancestral 採樣器除外,你只是單純地生成了一些東西)",
"Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "嘗試生成與在指定解析度下使用相同隨機種子生成的圖像相似的圖片",
"This text is used to rotate the feature space of the imgs embs": "此文本用於旋轉圖集 embeddings 的特徵空間",
"Separate values for X axis using commas.": "使用逗號分隔 X 軸的值",
"Separate values for Y axis using commas.": "使用逗號分隔 Y 軸的值",
"Write image to a directory (default - log/images) and generation parameters into csv file.": "將圖像寫入目錄(預設 — log/images並將生成參數寫入CSV檔案",
"Open images output directory": "打開圖像輸出目錄",
"How much to blur the mask before processing, in pixels.": "處理前要對蒙版進行多強的模糊,以畫素為單位",
"What to put inside the masked area before processing it with Stable Diffusion.": "在使用 Stable Diffusion 處理蒙版區域之前要在蒙版區域內放置什麼",
"fill it with colors of the image": "用圖像的顏色填充它",
"keep whatever was there originally": "保留原來的内容",
"fill it with latent space noise": "用潛空間的噪聲填充它",
"fill it with latent space zeroes": "用潛空間的零填充它",
"Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "將蒙版區域放大到目標解析度,做內補繪製,縮小後粘貼到原始圖像中",
"Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "將圖像大小調整為目標解析度。除非高度和寬度匹配,否則你將獲得不正確的縱橫比",
"Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "調整圖像大小,使整個目標解析度都被圖像填充。裁剪多出來的部分",
"Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "調整圖像大小,使整個圖像在目標解析度內。用圖像的顏色填充空白區域",
"How many times to repeat processing an image and using it as input for the next iteration": "重複處理圖像並用作下次疊代輸入的次數",
"In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "在回送模式下,在每個循環中,去噪強度都會乘以該值。<1 表示減少多樣性,因此你的這一組圖將集中在固定的圖像上。>1 意味著增加多樣性,因此你的這一組圖將變得越來越混亂",
"For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "使用 SD 放大時,圖塊之間應該有多少畫素重疊。圖塊之間需要重疊才可以讓它們在合併回一張圖像時,沒有清晰可見的接縫",
"A directory on the same machine where the server is running.": "與伺服器主機上的目錄",
"Leave blank to save images to the default path.": "留空以將圖像儲存到預設路徑",
"Result = A * (1 - M) + B * M": "結果 = A * (1 - M) + B * M",
"Result = A + (B - C) * M": "結果 = A + (B - C) * M",
"1st and last digit must be 1. ex:'1, 2, 1'": "第一個和最後一個數字必須是 1。例'1, 2, 1'",
"how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.": "訓練應該多快。低值將需要更長的時間來訓練,高值可能無法收斂(無法產生準確的結果)以及/也許可能會破壞 embedding如果你在訓練資訊文字方塊中看到 Loss: nan 就會發生這種情況。如果發生這種情況,你需要從較舊的未損壞的備份手動恢復 embedding\n\n你可以使用以下語法設定單個數值或多個學習率\n\n 率1:步限1, 率2:步限2, …\n\n如 0.005:100, 1e-3:1000, 1e-5\n\n即前 100 步將以 0.005 的速率訓練,接著直到 1000 步為止以 1e-3 訓練,然後剩餘所有步以 1e-5 訓練",
"Path to directory with input images": "帶有輸入圖像的路徑",
"Path to directory where to write outputs": "進行輸出的路徑",
"Input images directory": "輸入圖像目錄",
"Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下標籤定義如何選擇圖像的檔案名: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp] 預設請留空",
"If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.": "如果啟用此選項,浮水印將不會加入到生成出來的圖像中。警告:如果你不加入浮水印,你的行為可能是不符合道德操守的",
"Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp]; leave empty for default.": "使用以下標籤定義如何選擇圖像和概覽圖的子目錄: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [job_timestamp] 預設請留空",
"Restore low quality faces using GFPGAN neural network": "使用 GFPGAN 神經網路修復低品質面部",
"This regular expression will be used extract words from filename, and they will be joined using the option below into label text used for training. Leave empty to keep filename text as it is.": "此正則表達式將用於從檔案名中提取單詞,並將使用以下選項將它們接合到用於訓練的標籤文本中。留空以保持檔案名文本不變",
"This string will be used to join split words into a single line if the option above is enabled.": "如果啟用了上述選項,則此處的字元會用於將拆分的單詞接合為同一行",
"List of setting names, separated by commas, for settings that should go to the quick access bar at the top, rather than the usual setting tab. See modules/shared.py for setting names. Requires restarting to apply.": "設定名稱列表,以逗號分隔,設定應轉到頂部的快速存取列,而不是通常的設定頁籤。有關設定名稱,請參見 modules/shared.py。需要重新啟動才能套用",
"If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.": "如果這個值不為零,它將被加入到隨機種子中,並在使用帶有 Eta 的採樣器時用於初始化隨機噪聲。你可以使用它來產生更多的圖像變化,或者你可以使用它來模仿其他軟體生成的圖像,如果你知道你在做什麼",
"Enable Autocomplete": "開啟Tag補全",
"Allowed categories for random artists selection when using the Roll button": "使用抽選藝術家按鈕時將會隨機的藝術家類別",
"Roll three": "抽三位出來",
"Generate forever": "不停地生成",
"Cancel generate forever": "取消不停地生成"
}

View File

@ -1,29 +1,40 @@
from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI
import base64
import io
import time
import uvicorn
from gradio.processing_utils import decode_base64_to_file, decode_base64_to_image
from fastapi import APIRouter, Depends, HTTPException
import modules.shared as shared
from modules import devices
from modules.api.models import *
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.sd_samplers import all_samplers
from modules.extras import run_pnginfo
import modules.shared as shared
import uvicorn
from fastapi import Body, APIRouter, HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field, Json
from typing import List
import json
import io
import base64
from PIL import Image
from modules.extras import run_extras, run_pnginfo
def upscaler_to_index(name: str):
try:
return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
except:
raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be on of these: {' , '.join([x.name for x in sd_upscalers])}")
sampler_to_index = lambda name: next(filter(lambda row: name.lower() == row[1].name.lower(), enumerate(all_samplers)), None)
class TextToImageResponse(BaseModel):
images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: Json
info: Json
class ImageToImageResponse(BaseModel):
images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: Json
info: Json
def setUpscalers(req: dict):
reqDict = vars(req)
reqDict['extras_upscaler_1'] = upscaler_to_index(req.upscaler_1)
reqDict['extras_upscaler_2'] = upscaler_to_index(req.upscaler_2)
reqDict.pop('upscaler_1')
reqDict.pop('upscaler_2')
return reqDict
def encode_pil_to_base64(image):
buffer = io.BytesIO()
image.save(buffer, format="png")
return base64.b64encode(buffer.getvalue())
class Api:
@ -31,16 +42,13 @@ class Api:
self.router = APIRouter()
self.app = app
self.queue_lock = queue_lock
self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"])
self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"])
def __base64_to_image(self, base64_string):
# if has a comma, deal with prefix
if "," in base64_string:
base64_string = base64_string.split(",")[1]
imgdata = base64.b64decode(base64_string)
# convert base64 to PIL image
return Image.open(io.BytesIO(imgdata))
self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse)
self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse)
self.app.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse)
self.app.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse)
self.app.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
self.app.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse)
self.app.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
sampler_index = sampler_to_index(txt2imgreq.sampler_index)
@ -57,18 +65,17 @@ class Api:
)
p = StableDiffusionProcessingTxt2Img(**vars(populate))
# Override object param
shared.state.begin()
with self.queue_lock:
processed = process_images(p)
b64images = []
for i in processed.images:
buffer = io.BytesIO()
i.save(buffer, format="png")
b64images.append(base64.b64encode(buffer.getvalue()))
return TextToImageResponse(images=b64images, parameters=json.dumps(vars(txt2imgreq)), info=processed.js())
shared.state.end()
b64images = list(map(encode_pil_to_base64, processed.images))
return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI):
sampler_index = sampler_to_index(img2imgreq.sampler_index)
@ -83,7 +90,7 @@ class Api:
mask = img2imgreq.mask
if mask:
mask = self.__base64_to_image(mask)
mask = decode_base64_to_image(mask)
populate = img2imgreq.copy(update={ # Override __init__ params
@ -98,31 +105,90 @@ class Api:
imgs = []
for img in init_images:
img = self.__base64_to_image(img)
img = decode_base64_to_image(img)
imgs = [img] * p.batch_size
p.init_images = imgs
# Override object param
shared.state.begin()
with self.queue_lock:
processed = process_images(p)
b64images = []
for i in processed.images:
buffer = io.BytesIO()
i.save(buffer, format="png")
b64images.append(base64.b64encode(buffer.getvalue()))
shared.state.end()
b64images = list(map(encode_pil_to_base64, processed.images))
if (not img2imgreq.include_init_images):
img2imgreq.init_images = None
img2imgreq.mask = None
return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=processed.js())
return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js())
def extrasapi(self):
raise NotImplementedError
def extras_single_image_api(self, req: ExtrasSingleImageRequest):
reqDict = setUpscalers(req)
def pnginfoapi(self):
raise NotImplementedError
reqDict['image'] = decode_base64_to_image(reqDict['image'])
with self.queue_lock:
result = run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", **reqDict)
return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1])
def extras_batch_images_api(self, req: ExtrasBatchImagesRequest):
reqDict = setUpscalers(req)
def prepareFiles(file):
file = decode_base64_to_file(file.data, file_path=file.name)
file.orig_name = file.name
return file
reqDict['image_folder'] = list(map(prepareFiles, reqDict['imageList']))
reqDict.pop('imageList')
with self.queue_lock:
result = run_extras(extras_mode=1, image="", input_dir="", output_dir="", **reqDict)
return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
def pnginfoapi(self, req: PNGInfoRequest):
if(not req.image.strip()):
return PNGInfoResponse(info="")
result = run_pnginfo(decode_base64_to_image(req.image.strip()))
return PNGInfoResponse(info=result[1])
def progressapi(self, req: ProgressRequest = Depends()):
# copy from check_progress_call of ui.py
if shared.state.job_count == 0:
return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict())
# avoid dividing zero
progress = 0.01
if shared.state.job_count > 0:
progress += shared.state.job_no / shared.state.job_count
if shared.state.sampling_steps > 0:
progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
time_since_start = time.time() - shared.state.time_start
eta = (time_since_start/progress)
eta_relative = eta-time_since_start
progress = min(progress, 1)
current_image = None
if shared.state.current_image and not req.skip_current_image:
current_image = encode_pil_to_base64(shared.state.current_image)
return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image)
def interruptapi(self):
shared.state.interrupt()
return {}
def launch(self, server_name, port):
self.app.include_router(self.router)

View File

@ -1,10 +1,11 @@
from array import array
from inflection import underscore
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, create_model
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
import inspect
from click import prompt
from pydantic import BaseModel, Field, create_model
from typing import Any, Optional
from typing_extensions import Literal
from inflection import underscore
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
from modules.shared import sd_upscalers
API_NOT_ALLOWED = [
"self",
@ -106,3 +107,61 @@ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
StableDiffusionProcessingImg2Img,
[{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}]
).generate_model()
class TextToImageResponse(BaseModel):
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: dict
info: str
class ImageToImageResponse(BaseModel):
images: list[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
parameters: dict
info: str
class ExtrasBaseRequest(BaseModel):
resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.")
show_extras_results: bool = Field(default=True, title="Show results", description="Should the backend return the generated image?")
gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.")
codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.")
codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.")
upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.")
upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the choosen size?")
upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.")
class ExtraBaseResponse(BaseModel):
html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.")
class ExtrasSingleImageRequest(ExtrasBaseRequest):
image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
class ExtrasSingleImageResponse(ExtraBaseResponse):
image: str = Field(default=None, title="Image", description="The generated image in base64 format.")
class FileData(BaseModel):
data: str = Field(title="File data", description="Base64 representation of the file")
name: str = Field(title="File name")
class ExtrasBatchImagesRequest(ExtrasBaseRequest):
imageList: list[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
class ExtrasBatchImagesResponse(ExtraBaseResponse):
images: list[str] = Field(title="Images", description="The generated images in base64 format.")
class PNGInfoRequest(BaseModel):
image: str = Field(title="Image", description="The base64 encoded PNG image")
class PNGInfoResponse(BaseModel):
info: str = Field(title="Image info", description="A string with all the info the image had")
class ProgressRequest(BaseModel):
skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
class ProgressResponse(BaseModel):
progress: float = Field(title="Progress", description="The progress with a range of 0 to 1")
eta_relative: float = Field(title="ETA in secs")
state: dict = Field(title="State", description="The current state snapshot")
current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.")

83
modules/extensions.py Normal file
View File

@ -0,0 +1,83 @@
import os
import sys
import traceback
import git
from modules import paths, shared
extensions = []
extensions_dir = os.path.join(paths.script_path, "extensions")
def active():
return [x for x in extensions if x.enabled]
class Extension:
def __init__(self, name, path, enabled=True):
self.name = name
self.path = path
self.enabled = enabled
self.status = ''
self.can_update = False
repo = None
try:
if os.path.exists(os.path.join(path, ".git")):
repo = git.Repo(path)
except Exception:
print(f"Error reading github repository info from {path}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
if repo is None or repo.bare:
self.remote = None
else:
self.remote = next(repo.remote().urls, None)
self.status = 'unknown'
def list_files(self, subdir, extension):
from modules import scripts
dirpath = os.path.join(self.path, subdir)
if not os.path.isdir(dirpath):
return []
res = []
for filename in sorted(os.listdir(dirpath)):
res.append(scripts.ScriptFile(self.path, filename, os.path.join(dirpath, filename)))
res = [x for x in res if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
return res
def check_updates(self):
repo = git.Repo(self.path)
for fetch in repo.remote().fetch("--dry-run"):
if fetch.flags != fetch.HEAD_UPTODATE:
self.can_update = True
self.status = "behind"
return
self.can_update = False
self.status = "latest"
def pull(self):
repo = git.Repo(self.path)
repo.remotes.origin.pull()
def list_extensions():
extensions.clear()
if not os.path.isdir(extensions_dir):
return
for dirname in sorted(os.listdir(extensions_dir)):
path = os.path.join(extensions_dir, dirname)
if not os.path.isdir(path):
continue
extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions)
extensions.append(extension)

View File

@ -1,3 +1,4 @@
from __future__ import annotations
import math
import os
@ -7,6 +8,10 @@ from PIL import Image
import torch
import tqdm
from typing import Callable, List, OrderedDict, Tuple
from functools import partial
from dataclasses import dataclass
from modules import processing, shared, images, devices, sd_models
from modules.shared import opts
import modules.gfpgan_model
@ -17,10 +22,38 @@ import piexif.helper
import gradio as gr
cached_images = {}
class LruCache(OrderedDict):
@dataclass(frozen=True)
class Key:
image_hash: int
info_hash: int
args_hash: int
@dataclass
class Value:
image: Image.Image
info: str
def __init__(self, max_size: int = 5, *args, **kwargs):
super().__init__(*args, **kwargs)
self._max_size = max_size
def get(self, key: LruCache.Key) -> LruCache.Value:
ret = super().get(key)
if ret is not None:
self.move_to_end(key) # Move to end of eviction list
return ret
def put(self, key: LruCache.Key, value: LruCache.Value) -> None:
self[key] = value
while len(self) > self._max_size:
self.popitem(last=False)
def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility):
cached_images: LruCache = LruCache(max_size=5)
def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool):
devices.torch_gc()
imageArr = []
@ -39,7 +72,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
if input_dir == '':
return outputs, "Please select an input directory.", ''
image_list = [file for file in [os.path.join(input_dir, x) for x in sorted(os.listdir(input_dir))] if os.path.isfile(file)]
image_list = shared.listfiles(input_dir)
for img in image_list:
try:
image = Image.open(img)
@ -56,6 +89,90 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
else:
outpath = opts.outdir_samples or opts.outdir_extras_samples
# Extra operation definitions
def run_gfpgan(image: Image.Image, info: str) -> Tuple[Image.Image, str]:
restored_img = modules.gfpgan_model.gfpgan_fix_faces(np.array(image, dtype=np.uint8))
res = Image.fromarray(restored_img)
if gfpgan_visibility < 1.0:
res = Image.blend(image, res, gfpgan_visibility)
info += f"GFPGAN visibility:{round(gfpgan_visibility, 2)}\n"
return (res, info)
def run_codeformer(image: Image.Image, info: str) -> Tuple[Image.Image, str]:
restored_img = modules.codeformer_model.codeformer.restore(np.array(image, dtype=np.uint8), w=codeformer_weight)
res = Image.fromarray(restored_img)
if codeformer_visibility < 1.0:
res = Image.blend(image, res, codeformer_visibility)
info += f"CodeFormer w: {round(codeformer_weight, 2)}, CodeFormer visibility:{round(codeformer_visibility, 2)}\n"
return (res, info)
def upscale(image, scaler_index, resize, mode, resize_w, resize_h, crop):
upscaler = shared.sd_upscalers[scaler_index]
res = upscaler.scaler.upscale(image, resize, upscaler.data_path)
if mode == 1 and crop:
cropped = Image.new("RGB", (resize_w, resize_h))
cropped.paste(res, box=(resize_w // 2 - res.width // 2, resize_h // 2 - res.height // 2))
res = cropped
return res
def run_prepare_crop(image: Image.Image, info: str) -> Tuple[Image.Image, str]:
# Actual crop happens in run_upscalers_blend, this just sets upscaling_resize and adds info text
nonlocal upscaling_resize
if resize_mode == 1:
upscaling_resize = max(upscaling_resize_w/image.width, upscaling_resize_h/image.height)
crop_info = " (crop)" if upscaling_crop else ""
info += f"Resize to: {upscaling_resize_w:g}x{upscaling_resize_h:g}{crop_info}\n"
return (image, info)
@dataclass
class UpscaleParams:
upscaler_idx: int
blend_alpha: float
def run_upscalers_blend(params: List[UpscaleParams], image: Image.Image, info: str) -> Tuple[Image.Image, str]:
blended_result: Image.Image = None
for upscaler in params:
upscale_args = (upscaler.upscaler_idx, upscaling_resize, resize_mode,
upscaling_resize_w, upscaling_resize_h, upscaling_crop)
cache_key = LruCache.Key(image_hash=hash(np.array(image.getdata()).tobytes()),
info_hash=hash(info),
args_hash=hash((upscale_args, upscale_first)))
cached_entry = cached_images.get(cache_key)
if cached_entry is None:
res = upscale(image, *upscale_args)
info += f"Upscale: {round(upscaling_resize, 3)}, visibility: {upscaler.blend_alpha}, model:{shared.sd_upscalers[upscaler.upscaler_idx].name}\n"
cached_images.put(cache_key, LruCache.Value(image=res, info=info))
else:
res, info = cached_entry.image, cached_entry.info
if blended_result is None:
blended_result = res
else:
blended_result = Image.blend(blended_result, res, upscaler.blend_alpha)
return (blended_result, info)
# Build a list of operations to run
facefix_ops: List[Callable] = []
facefix_ops += [run_gfpgan] if gfpgan_visibility > 0 else []
facefix_ops += [run_codeformer] if codeformer_visibility > 0 else []
upscale_ops: List[Callable] = []
upscale_ops += [run_prepare_crop] if resize_mode == 1 else []
if upscaling_resize != 0:
step_params: List[UpscaleParams] = []
step_params.append(UpscaleParams(upscaler_idx=extras_upscaler_1, blend_alpha=1.0))
if extras_upscaler_2 != 0 and extras_upscaler_2_visibility > 0:
step_params.append(UpscaleParams(upscaler_idx=extras_upscaler_2, blend_alpha=extras_upscaler_2_visibility))
upscale_ops.append(partial(run_upscalers_blend, step_params))
extras_ops: List[Callable] = (upscale_ops + facefix_ops) if upscale_first else (facefix_ops + upscale_ops)
for image, image_name in zip(imageArr, imageNameArr):
if image is None:
@ -64,63 +181,9 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
image = image.convert("RGB")
info = ""
if gfpgan_visibility > 0:
restored_img = modules.gfpgan_model.gfpgan_fix_faces(np.array(image, dtype=np.uint8))
res = Image.fromarray(restored_img)
if gfpgan_visibility < 1.0:
res = Image.blend(image, res, gfpgan_visibility)
info += f"GFPGAN visibility:{round(gfpgan_visibility, 2)}\n"
image = res
if codeformer_visibility > 0:
restored_img = modules.codeformer_model.codeformer.restore(np.array(image, dtype=np.uint8), w=codeformer_weight)
res = Image.fromarray(restored_img)
if codeformer_visibility < 1.0:
res = Image.blend(image, res, codeformer_visibility)
info += f"CodeFormer w: {round(codeformer_weight, 2)}, CodeFormer visibility:{round(codeformer_visibility, 2)}\n"
image = res
if resize_mode == 1:
upscaling_resize = max(upscaling_resize_w/image.width, upscaling_resize_h/image.height)
crop_info = " (crop)" if upscaling_crop else ""
info += f"Resize to: {upscaling_resize_w:g}x{upscaling_resize_h:g}{crop_info}\n"
if upscaling_resize != 1.0:
def upscale(image, scaler_index, resize, mode, resize_w, resize_h, crop):
small = image.crop((image.width // 2, image.height // 2, image.width // 2 + 10, image.height // 2 + 10))
pixels = tuple(np.array(small).flatten().tolist())
key = (resize, scaler_index, image.width, image.height, gfpgan_visibility, codeformer_visibility, codeformer_weight,
resize_mode, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop) + pixels
c = cached_images.get(key)
if c is None:
upscaler = shared.sd_upscalers[scaler_index]
c = upscaler.scaler.upscale(image, resize, upscaler.data_path)
if mode == 1 and crop:
cropped = Image.new("RGB", (resize_w, resize_h))
cropped.paste(c, box=(resize_w // 2 - c.width // 2, resize_h // 2 - c.height // 2))
c = cropped
cached_images[key] = c
return c
info += f"Upscale: {round(upscaling_resize, 3)}, model:{shared.sd_upscalers[extras_upscaler_1].name}\n"
res = upscale(image, extras_upscaler_1, upscaling_resize, resize_mode, upscaling_resize_w, upscaling_resize_h, upscaling_crop)
if extras_upscaler_2 != 0 and extras_upscaler_2_visibility > 0:
res2 = upscale(image, extras_upscaler_2, upscaling_resize, resize_mode, upscaling_resize_w, upscaling_resize_h, upscaling_crop)
info += f"Upscale: {round(upscaling_resize, 3)}, visibility: {round(extras_upscaler_2_visibility, 3)}, model:{shared.sd_upscalers[extras_upscaler_2].name}\n"
res = Image.blend(res, res2, extras_upscaler_2_visibility)
image = res
while len(cached_images) > 2:
del cached_images[next(iter(cached_images.keys()))]
# Run each operation on each image
for op in extras_ops:
image, info = op(image, info)
if opts.use_original_name_batch and image_name != None:
basename = os.path.splitext(os.path.basename(image_name))[0]
@ -141,6 +204,9 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_
return outputs, plaintext_to_html(info), ''
def clear_cache():
cached_images.clear()
def run_pnginfo(image):
if image is None:

View File

@ -1,14 +1,25 @@
import base64
import io
import os
import re
import gradio as gr
from modules.shared import script_path
from modules import shared
import tempfile
from PIL import Image
re_param_code = r'\s*([\w ]+):\s*("(?:\\|\"|[^\"])+"|[^,]*)(?:,|$)'
re_param = re.compile(re_param_code)
re_params = re.compile(r"^(?:" + re_param_code + "){3,}$")
re_imagesize = re.compile(r"^(\d+)x(\d+)$")
type_of_gr_update = type(gr.update())
paste_fields = {}
bind_list = []
def reset():
paste_fields.clear()
bind_list.clear()
def quote(text):
@ -20,6 +31,111 @@ def quote(text):
text = text.replace('"', '\\"')
return f'"{text}"'
def image_from_url_text(filedata):
if type(filedata) == dict and filedata["is_file"]:
filename = filedata["name"]
tempdir = os.path.normpath(tempfile.gettempdir())
normfn = os.path.normpath(filename)
assert normfn.startswith(tempdir), 'trying to open image file not in temporary directory'
return Image.open(filename)
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def add_paste_fields(tabname, init_img, fields):
paste_fields[tabname] = {"init_img": init_img, "fields": fields}
# backwards compatibility for existing extensions
import modules.ui
if tabname == 'txt2img':
modules.ui.txt2img_paste_fields = fields
elif tabname == 'img2img':
modules.ui.img2img_paste_fields = fields
def integrate_settings_paste_fields(component_dict):
from modules import ui
settings_map = {
'sd_hypernetwork': 'Hypernet',
'sd_hypernetwork_strength': 'Hypernet strength',
'CLIP_stop_at_last_layers': 'Clip skip',
'sd_model_checkpoint': 'Model hash',
}
settings_paste_fields = [
(component_dict[k], lambda d, k=k, v=v: ui.apply_setting(k, d.get(v, None)))
for k, v in settings_map.items()
]
for tabname, info in paste_fields.items():
if info["fields"] is not None:
info["fields"] += settings_paste_fields
def create_buttons(tabs_list):
buttons = {}
for tab in tabs_list:
buttons[tab] = gr.Button(f"Send to {tab}")
return buttons
#if send_generate_info is a tab name, mean generate_info comes from the params fields of the tab
def bind_buttons(buttons, send_image, send_generate_info):
bind_list.append([buttons, send_image, send_generate_info])
def run_bind():
for buttons, send_image, send_generate_info in bind_list:
for tab in buttons:
button = buttons[tab]
if send_image and paste_fields[tab]["init_img"]:
if type(send_image) == gr.Gallery:
button.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery",
inputs=[send_image],
outputs=[paste_fields[tab]["init_img"]],
)
else:
button.click(
fn=lambda x: x,
inputs=[send_image],
outputs=[paste_fields[tab]["init_img"]],
)
if send_generate_info and paste_fields[tab]["fields"] is not None:
if send_generate_info in paste_fields:
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Size-1', 'Size-2'] + (["Seed"] if shared.opts.send_seed else [])
button.click(
fn=lambda *x: x,
inputs=[field for field, name in paste_fields[send_generate_info]["fields"] if name in paste_field_names],
outputs=[field for field, name in paste_fields[tab]["fields"] if name in paste_field_names],
)
else:
connect_paste(button, paste_fields[tab]["fields"], send_generate_info)
button.click(
fn=None,
_js=f"switch_to_{tab}",
inputs=None,
outputs=None,
)
def parse_generation_parameters(x: str):
"""parses generation parameters string, the one you see in text field under the picture in UI:
```
@ -68,7 +184,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
return res
def connect_paste(button, paste_fields, input_comp, js=None):
def connect_paste(button, paste_fields, input_comp, jsfunc=None):
def paste_func(prompt):
if not prompt and not shared.cmd_opts.hide_ui_dir_config:
filename = os.path.join(script_path, "params.txt")
@ -106,7 +222,9 @@ def connect_paste(button, paste_fields, input_comp, js=None):
button.click(
fn=paste_func,
_js=js,
_js=jsfunc,
inputs=[input_comp],
outputs=[x[0] for x in paste_fields],
)

View File

@ -25,6 +25,7 @@ from statistics import stdev, mean
class HypernetworkModule(torch.nn.Module):
multiplier = 1.0
activation_dict = {
"linear": torch.nn.Identity,
"relu": torch.nn.ReLU,
"leakyrelu": torch.nn.LeakyReLU,
"elu": torch.nn.ELU,
@ -208,13 +209,16 @@ def list_hypernetworks(path):
res = {}
for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
name = os.path.splitext(os.path.basename(filename))[0]
res[name] = filename
# Prevent a hypothetical "None.pt" from being listed.
if name != "None":
res[name] = filename
return res
def load_hypernetwork(filename):
path = shared.hypernetworks.get(filename, None)
if path is not None:
# Prevent any file named "None.pt" from being loaded.
if path is not None and filename != "None":
print(f"Loading hypernetwork {filename}")
try:
shared.loaded_hypernetwork = Hypernetwork()
@ -331,7 +335,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
# images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images
assert hypernetwork_name, 'hypernetwork not selected'
save_hypernetwork_every = save_hypernetwork_every or 0
create_image_every = create_image_every or 0
textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
path = shared.hypernetworks.get(hypernetwork_name, None)
shared.loaded_hypernetwork = Hypernetwork()
@ -357,18 +363,25 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
else:
images_dir = None
hypernetwork = shared.loaded_hypernetwork
checkpoint = sd_models.select_checkpoint()
ititial_step = hypernetwork.step or 0
if ititial_step >= steps:
shared.state.textinfo = f"Model has already been trained beyond specified max steps"
return hypernetwork, filename
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)
shared.sd_model.first_stage_model.to(devices.cpu)
hypernetwork = shared.loaded_hypernetwork
weights = hypernetwork.weights()
for weight in weights:
weight.requires_grad = True
size = len(ds.indexes)
loss_dict = defaultdict(lambda : deque(maxlen = 1024))
losses = torch.zeros((size,))
@ -376,20 +389,18 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
previous_mean_loss = 0
print("Mean loss of {} elements".format(size))
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
ititial_step = hypernetwork.step or 0
if ititial_step > steps:
return hypernetwork, filename
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
weights = hypernetwork.weights()
for weight in weights:
weight.requires_grad = True
# if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
steps_without_grad = 0
last_saved_file = "<none>"
last_saved_image = "<none>"
forced_filename = "<none>"
pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step)
for i, entries in pbar:
hypernetwork.step = i + ititial_step
@ -428,6 +439,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
optimizer.step()
steps_done = hypernetwork.step + 1
if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
raise RuntimeError("Loss diverged.")
@ -438,19 +451,19 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
dataset_loss_info = f"dataset loss:{mean(previous_mean_losses):.3f}" + u"\u00B1" + f"({std / (len(previous_mean_losses) ** 0.5):.3f})"
pbar.set_description(dataset_loss_info)
if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0:
if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
# Before saving, change name to match current checkpoint.
hypernetwork.name = f'{hypernetwork_name}-{hypernetwork.step}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork.name}.pt')
hypernetwork.save(last_saved_file)
hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
"loss": f"{previous_mean_loss:.7f}",
"learn_rate": scheduler.learn_rate
})
if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0:
forced_filename = f'{hypernetwork_name}-{hypernetwork.step}'
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{hypernetwork_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
optimizer.zero_grad()
@ -503,13 +516,23 @@ Last saved image: {html.escape(last_saved_image)}<br/>
"""
report_statistics(loss_dict)
checkpoint = sd_models.select_checkpoint()
hypernetwork.sd_checkpoint = checkpoint.hash
hypernetwork.sd_checkpoint_name = checkpoint.model_name
# Before saving for the last time, change name back to the base name (as opposed to the save_hypernetwork_every step-suffixed naming convention).
hypernetwork.name = hypernetwork_name
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork.name}.pt')
hypernetwork.save(filename)
filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
return hypernetwork, filename
def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
old_hypernetwork_name = hypernetwork.name
old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None
try:
hypernetwork.sd_checkpoint = checkpoint.hash
hypernetwork.sd_checkpoint_name = checkpoint.model_name
hypernetwork.name = hypernetwork_name
hypernetwork.save(filename)
except:
hypernetwork.sd_checkpoint = old_sd_checkpoint
hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
hypernetwork.name = old_hypernetwork_name
raise

View File

@ -8,7 +8,8 @@ import modules.textual_inversion.textual_inversion
from modules import devices, sd_hijack, shared
from modules.hypernetworks import hypernetwork
keys = list(hypernetwork.HypernetworkModule.activation_dict.keys())
not_available = ["hardswish", "multiheadattention"]
keys = ["linear"] + list(x for x in hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available)
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
# Remove illegal characters from name.

View File

@ -300,8 +300,8 @@ class FilenameGenerator:
'seed': lambda self: self.seed if self.seed is not None else '',
'steps': lambda self: self.p and self.p.steps,
'cfg': lambda self: self.p and self.p.cfg_scale,
'width': lambda self: self.p and self.p.width,
'height': lambda self: self.p and self.p.height,
'width': lambda self: self.image.width,
'height': lambda self: self.image.height,
'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
'sampler': lambda self: self.p and sanitize_filename_part(sd_samplers.samplers[self.p.sampler_index].name, replace_spaces=False),
'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
@ -315,10 +315,11 @@ class FilenameGenerator:
}
default_time_format = '%Y%m%d%H%M%S'
def __init__(self, p, seed, prompt):
def __init__(self, p, seed, prompt, image):
self.p = p
self.seed = seed
self.prompt = prompt
self.image = image
def prompt_no_style(self):
if self.p is None or self.prompt is None:
@ -449,7 +450,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
txt_fullfn (`str` or None):
If a text file is saved for this image, this will be its full path. Otherwise None.
"""
namegen = FilenameGenerator(p, seed, prompt)
namegen = FilenameGenerator(p, seed, prompt, image)
if save_to_dirs is None:
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
@ -509,8 +510,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo()
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
if opts.enable_pnginfo:
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo_data)

View File

@ -19,7 +19,7 @@ import modules.scripts
def process_batch(p, input_dir, output_dir, args):
processing.fix_seed(p)
images = [file for file in [os.path.join(input_dir, x) for x in os.listdir(input_dir)] if os.path.isfile(file)]
images = shared.listfiles(input_dir)
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
@ -55,6 +55,7 @@ def process_batch(p, input_dir, output_dir, args):
filename = f"{left}-{n}{right}"
if not save_normally:
os.makedirs(output_dir, exist_ok=True)
processed_image.save(os.path.join(output_dir, filename))
@ -137,6 +138,8 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
if processed is None:
processed = process_images(p)
p.close()
shared.total_tqdm.clear()
generation_info_js = processed.js()

View File

@ -56,9 +56,9 @@ class InterrogateModels:
import clip
if self.running_on_cpu:
model, preprocess = clip.load(clip_model_name, device="cpu")
model, preprocess = clip.load(clip_model_name, device="cpu", download_root=shared.cmd_opts.clip_models_path)
else:
model, preprocess = clip.load(clip_model_name)
model, preprocess = clip.load(clip_model_name, download_root=shared.cmd_opts.clip_models_path)
model.eval()
model = model.to(devices.device_interrogate)

View File

@ -38,13 +38,18 @@ def setup_for_low_vram(sd_model, use_medvram):
# see below for register_forward_pre_hook;
# first_stage_model does not use forward(), it uses encode/decode, so register_forward_pre_hook is
# useless here, and we just replace those methods
def first_stage_model_encode_wrap(self, encoder, x):
send_me_to_gpu(self, None)
return encoder(x)
def first_stage_model_decode_wrap(self, decoder, z):
send_me_to_gpu(self, None)
return decoder(z)
first_stage_model = sd_model.first_stage_model
first_stage_model_encode = sd_model.first_stage_model.encode
first_stage_model_decode = sd_model.first_stage_model.decode
def first_stage_model_encode_wrap(x):
send_me_to_gpu(first_stage_model, None)
return first_stage_model_encode(x)
def first_stage_model_decode_wrap(z):
send_me_to_gpu(first_stage_model, None)
return first_stage_model_decode(z)
# remove three big modules, cond, first_stage, and unet from the model and then
# send the model to GPU. Then put modules back. the modules will be in CPU.
@ -56,8 +61,8 @@ def setup_for_low_vram(sd_model, use_medvram):
# register hooks for those the first two models
sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu)
sd_model.first_stage_model.encode = lambda x, en=sd_model.first_stage_model.encode: first_stage_model_encode_wrap(sd_model.first_stage_model, en, x)
sd_model.first_stage_model.decode = lambda z, de=sd_model.first_stage_model.decode: first_stage_model_decode_wrap(sd_model.first_stage_model, de, z)
sd_model.first_stage_model.encode = first_stage_model_encode_wrap
sd_model.first_stage_model.decode = first_stage_model_decode_wrap
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
if use_medvram:

View File

@ -129,12 +129,83 @@ class StableDiffusionProcessing():
self.all_seeds = None
self.all_subseeds = None
def txt2img_image_conditioning(self, x, width=None, height=None):
if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
# Dummy zero conditioning if we're not using inpainting model.
# Still takes up a bit of memory, but no encoder call.
# Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
return torch.zeros(
x.shape[0], 5, 1, 1,
dtype=x.dtype,
device=x.device
)
height = height or self.height
width = width or self.width
# The "masked-image" in this case will just be all zeros since the entire image is masked.
image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning))
# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
image_conditioning = image_conditioning.to(x.dtype)
return image_conditioning
def img2img_image_conditioning(self, source_image, latent_image, image_mask = None):
if self.sampler.conditioning_key not in {'hybrid', 'concat'}:
# Dummy zero conditioning if we're not using inpainting model.
return torch.zeros(
latent_image.shape[0], 5, 1, 1,
dtype=latent_image.dtype,
device=latent_image.device
)
# Handle the different mask inputs
if image_mask is not None:
if torch.is_tensor(image_mask):
conditioning_mask = image_mask
else:
conditioning_mask = np.array(image_mask.convert("L"))
conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
# Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
conditioning_mask = torch.round(conditioning_mask)
else:
conditioning_mask = torch.ones(1, 1, *source_image.shape[-2:])
# Create another latent image, this time with a masked version of the original input.
# Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
conditioning_mask = conditioning_mask.to(source_image.device)
conditioning_image = torch.lerp(
source_image,
source_image * (1.0 - conditioning_mask),
getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)
)
# Encode the new masked image using first stage of network.
conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
# Create the concatenated conditioning tensor to be fed to `c_concat`
conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1)
image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1)
image_conditioning = image_conditioning.to(shared.device).type(self.sd_model.dtype)
return image_conditioning
def init(self, all_prompts, all_seeds, all_subseeds):
pass
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
raise NotImplementedError()
def close(self):
self.sd_model = None
self.sampler = None
class Processed:
def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None):
@ -329,6 +400,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
"Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name),
"Hypernet strength": (None if shared.loaded_hypernetwork is None or shared.opts.sd_hypernetwork_strength >= 1 else shared.opts.sd_hypernetwork_strength),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
@ -411,7 +483,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
model_hijack.embedding_db.load_textual_inversion_embeddings()
if p.scripts is not None:
p.scripts.run_alwayson_scripts(p)
p.scripts.process(p)
infotexts = []
output_images = []
@ -434,7 +506,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
if (len(prompts) == 0):
if len(prompts) == 0:
break
with devices.autocast():
@ -523,7 +595,13 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
devices.torch_gc()
return Processed(p, output_images, p.all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], all_prompts=p.all_prompts, all_seeds=p.all_seeds, all_subseeds=p.all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts)
res = Processed(p, output_images, p.all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], all_prompts=p.all_prompts, all_seeds=p.all_seeds, all_subseeds=p.all_subseeds, index_of_first_image=index_of_first_image, infotexts=infotexts)
if p.scripts is not None:
p.scripts.postprocess(p, res)
return res
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
@ -571,37 +649,16 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f
self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f
def create_dummy_mask(self, x, width=None, height=None):
if self.sampler.conditioning_key in {'hybrid', 'concat'}:
height = height or self.height
width = width or self.width
# The "masked-image" in this case will just be all zeros since the entire image is masked.
image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning))
# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
image_conditioning = image_conditioning.to(x.dtype)
else:
# Dummy zero conditioning if we're not using inpainting model.
# Still takes up a bit of memory, but no encoder call.
# Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
image_conditioning = torch.zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
return image_conditioning
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
if not self.enable_hr:
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.create_dummy_mask(x))
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
return samples
x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.create_dummy_mask(x, self.firstphase_width, self.firstphase_height))
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x, self.firstphase_width, self.firstphase_height))
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2]
@ -634,11 +691,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
image_conditioning = self.txt2img_image_conditioning(x)
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps, image_conditioning=self.create_dummy_mask(samples))
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps, image_conditioning=image_conditioning)
return samples
@ -770,33 +829,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
elif self.inpainting_fill == 3:
self.init_latent = self.init_latent * self.mask
if self.sampler.conditioning_key in {'hybrid', 'concat'}:
if self.image_mask is not None:
conditioning_mask = np.array(self.image_mask.convert("L"))
conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
# Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
conditioning_mask = torch.round(conditioning_mask)
else:
conditioning_mask = torch.ones(1, 1, *image.shape[-2:])
# Create another latent image, this time with a masked version of the original input.
conditioning_mask = conditioning_mask.to(image.device)
conditioning_image = image * (1.0 - conditioning_mask)
conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
# Create the concatenated conditioning tensor to be fed to `c_concat`
conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=self.init_latent.shape[-2:])
conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1)
self.image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1)
self.image_conditioning = self.image_conditioning.to(shared.device).type(self.sd_model.dtype)
else:
self.image_conditioning = torch.zeros(
self.init_latent.shape[0], 5, 1, 1,
dtype=self.init_latent.dtype,
device=self.init_latent.device
)
self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, self.image_mask)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):

View File

@ -32,7 +32,7 @@ class RestrictedUnpickler(pickle.Unpickler):
return getattr(collections, name)
if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']:
return getattr(torch._utils, name)
if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage']:
if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage', 'DoubleStorage', 'ByteStorage']:
return getattr(torch, name)
if module == 'torch.nn.modules.container' and name in ['ParameterDict']:
return getattr(torch.nn.modules.container, name)

View File

@ -3,6 +3,8 @@ import traceback
from collections import namedtuple
import inspect
from fastapi import FastAPI
from gradio import Blocks
def report_exception(c, job):
print(f"Error executing callback {job} for {c.script}", file=sys.stderr)
@ -24,12 +26,32 @@ class ImageSaveParams:
"""dictionary with parameters for image's PNG info data; infotext will have the key 'parameters'"""
class CFGDenoiserParams:
def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps):
self.x = x
"""Latent image representation in the process of being denoised"""
self.image_cond = image_cond
"""Conditioning image"""
self.sigma = sigma
"""Current sigma noise step value"""
self.sampling_step = sampling_step
"""Current Sampling step number"""
self.total_sampling_steps = total_sampling_steps
"""Total number of sampling steps planned"""
ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"])
callbacks_app_started = []
callbacks_model_loaded = []
callbacks_ui_tabs = []
callbacks_ui_settings = []
callbacks_before_image_saved = []
callbacks_image_saved = []
callbacks_cfg_denoiser = []
def clear_callbacks():
@ -38,6 +60,14 @@ def clear_callbacks():
callbacks_ui_settings.clear()
callbacks_before_image_saved.clear()
callbacks_image_saved.clear()
callbacks_cfg_denoiser.clear()
def app_started_callback(demo: Blocks, app: FastAPI):
for c in callbacks_app_started:
try:
c.callback(demo, app)
except Exception:
report_exception(c, 'app_started_callback')
def model_loaded_callback(sd_model):
@ -69,7 +99,7 @@ def ui_settings_callback():
def before_image_saved_callback(params: ImageSaveParams):
for c in callbacks_image_saved:
for c in callbacks_before_image_saved:
try:
c.callback(params)
except Exception:
@ -84,6 +114,14 @@ def image_saved_callback(params: ImageSaveParams):
report_exception(c, 'image_saved_callback')
def cfg_denoiser_callback(params: CFGDenoiserParams):
for c in callbacks_cfg_denoiser:
try:
c.callback(params)
except Exception:
report_exception(c, 'cfg_denoiser_callback')
def add_callback(callbacks, fun):
stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if len(stack) > 0 else 'unknown file'
@ -91,6 +129,12 @@ def add_callback(callbacks, fun):
callbacks.append(ScriptCallback(filename, fun))
def on_app_started(callback):
"""register a function to be called when the webui started, the gradio `Block` component and
fastapi `FastAPI` object are passed as the arguments"""
add_callback(callbacks_app_started, callback)
def on_model_loaded(callback):
"""register a function to be called when the stable diffusion model is created; the model is
passed as an argument"""
@ -130,3 +174,12 @@ def on_image_saved(callback):
- params: ImageSaveParams - parameters the image was saved with. Changing fields in this object does nothing.
"""
add_callback(callbacks_image_saved, callback)
def on_cfg_denoiser(callback):
"""register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs.
The callback is called with one argument:
- params: CFGDenoiserParams - parameters to be passed to the inner model and sampling state details.
"""
add_callback(callbacks_cfg_denoiser, callback)

View File

@ -7,7 +7,7 @@ import modules.ui as ui
import gradio as gr
from modules.processing import StableDiffusionProcessing
from modules import shared, paths, script_callbacks
from modules import shared, paths, script_callbacks, extensions
AlwaysVisible = object()
@ -64,7 +64,16 @@ class Script:
def process(self, p, *args):
"""
This function is called before processing begins for AlwaysVisible scripts.
scripts. You can modify the processing object (p) here, inject hooks, etc.
You can modify the processing object (p) here, inject hooks, etc.
args contains all values returned by components from ui()
"""
pass
def postprocess(self, p, processed, *args):
"""
This function is called after processing ends for AlwaysVisible scripts.
args contains all values returned by components from ui()
"""
pass
@ -98,17 +107,8 @@ def list_scripts(scriptdirname, extension):
for filename in sorted(os.listdir(basedir)):
scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename)))
extdir = os.path.join(paths.script_path, "extensions")
if os.path.exists(extdir):
for dirname in sorted(os.listdir(extdir)):
dirpath = os.path.join(extdir, dirname)
scriptdirpath = os.path.join(dirpath, scriptdirname)
if not os.path.isdir(scriptdirpath):
continue
for filename in sorted(os.listdir(scriptdirpath)):
scripts_list.append(ScriptFile(dirpath, filename, os.path.join(scriptdirpath, filename)))
for ext in extensions.active():
scripts_list += ext.list_files(scriptdirname, extension)
scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
@ -118,11 +118,7 @@ def list_scripts(scriptdirname, extension):
def list_files_with_name(filename):
res = []
dirs = [paths.script_path]
extdir = os.path.join(paths.script_path, "extensions")
if os.path.exists(extdir):
dirs += [os.path.join(extdir, d) for d in sorted(os.listdir(extdir))]
dirs = [paths.script_path] + [ext.path for ext in extensions.active()]
for dirpath in dirs:
if not os.path.isdir(dirpath):
@ -236,7 +232,7 @@ class ScriptRunner:
with gr.Group():
create_script_ui(script, inputs, inputs_alwayson)
dropdown = gr.Dropdown(label="Script", choices=["None"] + self.titles, value="None", type="index")
dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index")
dropdown.save_to_config = True
inputs[0] = dropdown
@ -289,13 +285,22 @@ class ScriptRunner:
return processed
def run_alwayson_scripts(self, p):
def process(self, p):
for script in self.alwayson_scripts:
try:
script_args = p.script_args[script.args_from:script.args_to]
script.process(p, *script_args)
except Exception:
print(f"Error running alwayson script: {script.filename}", file=sys.stderr)
print(f"Error running process: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def postprocess(self, p, processed):
for script in self.alwayson_scripts:
try:
script_args = p.script_args[script.args_from:script.args_to]
script.postprocess(p, processed, *script_args)
except Exception:
print(f"Error running postprocess: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def reload_sources(self, cache):

View File

@ -94,6 +94,10 @@ class StableDiffusionModelHijack:
if type(model_embeddings.token_embedding) == EmbeddingsWithFixes:
model_embeddings.token_embedding = model_embeddings.token_embedding.wrapped
self.layers = None
self.circular_enabled = False
self.clip = None
def apply_circular(self, enable):
if self.circular_enabled == enable:
return

View File

@ -1,8 +1,10 @@
import collections
import os.path
import sys
import gc
from collections import namedtuple
import torch
import re
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
@ -36,7 +38,9 @@ def setup_model():
def checkpoint_tiles():
return sorted([x.title for x in checkpoints_list.values()])
convert = lambda name: int(name) if name.isdigit() else name.lower()
alphanumeric_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key)
def list_models():
@ -170,7 +174,9 @@ def load_model_weights(model, checkpoint_info):
print(f"Global Step: {pl_sd['global_step']}")
sd = get_state_dict_from_checkpoint(pl_sd)
missing, extra = model.load_state_dict(sd, strict=False)
del pl_sd
model.load_state_dict(sd, strict=False)
del sd
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
@ -194,9 +200,10 @@ def load_model_weights(model, checkpoint_info):
model.first_stage_model.to(devices.dtype_vae)
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
checkpoints_loaded.popitem(last=False) # LRU
if shared.opts.sd_checkpoint_cache > 0:
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
checkpoints_loaded.popitem(last=False) # LRU
else:
print(f"Loading weights [{sd_model_hash}] from cache")
checkpoints_loaded.move_to_end(checkpoint_info)
@ -214,6 +221,12 @@ def load_model(checkpoint_info=None):
if checkpoint_info.config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_info.config}")
if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
gc.collect()
devices.torch_gc()
sd_config = OmegaConf.load(checkpoint_info.config)
if should_hijack_inpainting(checkpoint_info):
@ -227,6 +240,7 @@ def load_model(checkpoint_info=None):
checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml"))
do_inpainting_hijack()
sd_model = instantiate_from_config(sd_config.model)
load_model_weights(sd_model, checkpoint_info)
@ -246,14 +260,18 @@ def load_model(checkpoint_info=None):
return sd_model
def reload_model_weights(sd_model, info=None):
def reload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
if not sd_model:
sd_model = shared.sd_model
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info):
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info)
return shared.sd_model

View File

@ -1,5 +1,6 @@
from collections import namedtuple
import numpy as np
from math import floor
import torch
import tqdm
from PIL import Image
@ -11,6 +12,7 @@ from modules import prompt_parser, devices, processing, images
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
@ -205,17 +207,22 @@ class VanillaStableDiffusionSampler:
self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def adjust_steps_if_invalid(self, p, num_steps):
if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'):
valid_step = 999 / (1000 // num_steps)
if valid_step == floor(valid_step):
return int(valid_step) + 1
return num_steps
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
steps = self.adjust_steps_if_invalid(p, steps)
self.initialize(p)
# existing code fails with certain step counts, like 9
try:
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
except Exception:
self.sampler.make_schedule(ddim_num_steps=steps+1, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x
@ -239,18 +246,14 @@ class VanillaStableDiffusionSampler:
self.last_latent = x
self.step = 0
steps = steps or p.steps
steps = self.adjust_steps_if_invalid(p, steps or p.steps)
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
# existing code fails with certain step counts, like 9
try:
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
except Exception:
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps+1, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
return samples_ddim
@ -278,6 +281,12 @@ class CFGDenoiser(torch.nn.Module):
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)
cfg_denoiser_callback(denoiser_params)
x_in = denoiser_params.x
image_cond_in = denoiser_params.image_cond
sigma_in = denoiser_params.sigma
if tensor.shape[1] == uncond.shape[1]:
cond_in = torch.cat([tensor, uncond])

View File

@ -40,7 +40,7 @@ parser.add_argument("--lowram", action='store_true', help="load stable diffusion
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
@ -51,6 +51,7 @@ parser.add_argument("--realesrgan-models-path", type=str, help="Path to director
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
@ -82,6 +83,7 @@ parser.add_argument("--api", action='store_true', help="use api=True to launch t
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the api instead of the webui")
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
cmd_opts = parser.parse_args()
restricted_opts = {
@ -96,6 +98,8 @@ restricted_opts = {
"outdir_save",
}
cmd_opts.disable_extension_access = cmd_opts.share or cmd_opts.listen
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_swinir, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'swinir', 'esrgan', 'scunet', 'codeformer'])
@ -131,6 +135,7 @@ class State:
current_image = None
current_image_sampling_step = 0
textinfo = None
need_restart = False
def skip(self):
self.skipped = True
@ -143,9 +148,38 @@ class State:
self.sampling_step = 0
self.current_image_sampling_step = 0
def get_job_timestamp(self):
return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp?
def dict(self):
obj = {
"skipped": self.skipped,
"interrupted": self.skipped,
"job": self.job,
"job_count": self.job_count,
"job_no": self.job_no,
"sampling_step": self.sampling_step,
"sampling_steps": self.sampling_steps,
}
return obj
def begin(self):
self.sampling_step = 0
self.job_count = -1
self.job_no = 0
self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
self.current_latent = None
self.current_image = None
self.current_image_sampling_step = 0
self.skipped = False
self.interrupted = False
self.textinfo = None
devices.torch_gc()
def end(self):
self.job = ""
self.job_count = 0
devices.torch_gc()
state = State()
@ -255,11 +289,12 @@ options_templates.update(options_section(('system', "System"), {
}))
options_templates.update(options_section(('training', "Training"), {
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM."),
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
"training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
"training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
@ -267,6 +302,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
"sd_hypernetwork_strength": OptionInfo(1.0, "Hypernetwork strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.001}),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
@ -303,6 +339,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(False, "Add model name to generation information"),
"disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
@ -322,6 +359,12 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
}))
options_templates.update(options_section((None, "Hidden options"), {
"disabled_extensions": OptionInfo([], "Disable those extensions"),
}))
options_templates.update()
class Options:
data = None
@ -333,8 +376,9 @@ class Options:
def __setattr__(self, key, value):
if self.data is not None:
if key in self.data:
if key in self.data or key in self.data_labels:
self.data[key] = value
return
return super(Options, self).__setattr__(key, value)
@ -449,3 +493,8 @@ total_tqdm = TotalTQDM()
mem_mon = modules.memmon.MemUsageMonitor("MemMon", device, opts)
mem_mon.start()
def listfiles(dirname):
filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname)) if not x.startswith(".")]
return [file for file in filenames if os.path.isfile(file)]

View File

@ -42,6 +42,8 @@ class PersonalizedBase(Dataset):
self.lines = lines
assert data_root, 'dataset directory not specified'
assert os.path.isdir(data_root), "Dataset directory doesn't exist"
assert os.listdir(data_root), "Dataset directory is empty"
cond_model = shared.sd_model.cond_stage_model
@ -86,12 +88,12 @@ class PersonalizedBase(Dataset):
assert len(self.dataset) > 0, "No images have been found in the dataset."
self.length = len(self.dataset) * repeats // batch_size
self.initial_indexes = np.arange(len(self.dataset))
self.dataset_length = len(self.dataset)
self.indexes = None
self.shuffle()
def shuffle(self):
self.indexes = self.initial_indexes[torch.randperm(self.initial_indexes.shape[0]).numpy()]
self.indexes = np.random.permutation(self.dataset_length)
def create_text(self, filename_text):
text = random.choice(self.lines)

View File

@ -4,30 +4,37 @@ import tqdm
class LearnScheduleIterator:
def __init__(self, learn_rate, max_steps, cur_step=0):
"""
specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, 1e-5:10000 until 10000
specify learn_rate as "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000
"""
pairs = learn_rate.split(',')
self.rates = []
self.it = 0
self.maxit = 0
for i, pair in enumerate(pairs):
tmp = pair.split(':')
if len(tmp) == 2:
step = int(tmp[1])
if step > cur_step:
self.rates.append((float(tmp[0]), min(step, max_steps)))
self.maxit += 1
if step > max_steps:
try:
for i, pair in enumerate(pairs):
if not pair.strip():
continue
tmp = pair.split(':')
if len(tmp) == 2:
step = int(tmp[1])
if step > cur_step:
self.rates.append((float(tmp[0]), min(step, max_steps)))
self.maxit += 1
if step > max_steps:
return
elif step == -1:
self.rates.append((float(tmp[0]), max_steps))
self.maxit += 1
return
elif step == -1:
else:
self.rates.append((float(tmp[0]), max_steps))
self.maxit += 1
return
else:
self.rates.append((float(tmp[0]), max_steps))
self.maxit += 1
return
assert self.rates
except (ValueError, AssertionError):
raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.')
def __iter__(self):
return self
@ -52,7 +59,7 @@ class LearnRateScheduler:
self.finished = False
def apply(self, optimizer, step_number):
if step_number <= self.end_step:
if step_number < self.end_step:
return
try:

View File

@ -119,7 +119,7 @@ class EmbeddingDatabase:
vec = emb.detach().to(devices.device, dtype=torch.float32)
embedding = Embedding(vec, name)
embedding.step = data.get('step', None)
embedding.sd_checkpoint = data.get('hash', None)
embedding.sd_checkpoint = data.get('sd_checkpoint', None)
embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
self.register_embedding(embedding, shared.sd_model)
@ -184,9 +184,8 @@ def write_loss(log_directory, filename, step, epoch_len, values):
if shared.opts.training_write_csv_every == 0:
return
if step % shared.opts.training_write_csv_every != 0:
if (step + 1) % shared.opts.training_write_csv_every != 0:
return
write_csv_header = False if os.path.exists(os.path.join(log_directory, filename)) else True
with open(os.path.join(log_directory, filename), "a+", newline='') as fout:
@ -196,18 +195,39 @@ def write_loss(log_directory, filename, step, epoch_len, values):
csv_writer.writeheader()
epoch = step // epoch_len
epoch_step = step - epoch * epoch_len
epoch_step = step % epoch_len
csv_writer.writerow({
"step": step + 1,
"epoch": epoch + 1,
"epoch": epoch,
"epoch_step": epoch_step + 1,
**values,
})
def validate_train_inputs(model_name, learn_rate, batch_size, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"):
assert model_name, f"{name} not selected"
assert learn_rate, "Learning rate is empty or 0"
assert isinstance(batch_size, int), "Batch size must be integer"
assert batch_size > 0, "Batch size must be positive"
assert data_root, "Dataset directory is empty"
assert os.path.isdir(data_root), "Dataset directory doesn't exist"
assert os.listdir(data_root), "Dataset directory is empty"
assert template_file, "Prompt template file is empty"
assert os.path.isfile(template_file), "Prompt template file doesn't exist"
assert steps, "Max steps is empty or 0"
assert isinstance(steps, int), "Max steps must be integer"
assert steps > 0 , "Max steps must be positive"
assert isinstance(save_model_every, int), "Save {name} must be integer"
assert save_model_every >= 0 , "Save {name} must be positive or 0"
assert isinstance(create_image_every, int), "Create image must be integer"
assert create_image_every >= 0 , "Create image must be positive or 0"
if save_model_every or create_image_every:
assert log_directory, "Log directory is empty"
def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
assert embedding_name, 'embedding not selected'
save_embedding_every = save_embedding_every or 0
create_image_every = create_image_every or 0
validate_train_inputs(embedding_name, learn_rate, batch_size, data_root, template_file, steps, save_embedding_every, create_image_every, log_directory, name="embedding")
shared.state.textinfo = "Initializing textual inversion training..."
shared.state.job_count = steps
@ -215,6 +235,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name)
unload = shared.opts.unload_models_when_training
if save_embedding_every > 0:
embedding_dir = os.path.join(log_directory, "embeddings")
@ -236,14 +257,27 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
cond_model = shared.sd_model.cond_stage_model
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size)
hijack = sd_hijack.model_hijack
embedding = hijack.embedding_db.word_embeddings[embedding_name]
checkpoint = sd_models.select_checkpoint()
ititial_step = embedding.step or 0
if ititial_step >= steps:
shared.state.textinfo = f"Model has already been trained beyond specified max steps"
return embedding, filename
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
# dataset loading may take a while, so input validations and early returns should be done before this
shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
with torch.autocast("cuda"):
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file, batch_size=batch_size)
if unload:
shared.sd_model.first_stage_model.to(devices.cpu)
embedding.vec.requires_grad = True
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
losses = torch.zeros((32,))
@ -252,13 +286,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
forced_filename = "<none>"
embedding_yet_to_be_embedded = False
ititial_step = embedding.step or 0
if ititial_step > steps:
return embedding, filename
scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate)
pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step)
for i, entries in pbar:
embedding.step = i + ititial_step
@ -282,17 +309,18 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
loss.backward()
optimizer.step()
steps_done = embedding.step + 1
epoch_num = embedding.step // len(ds)
epoch_step = embedding.step - (epoch_num * len(ds)) + 1
epoch_step = embedding.step % len(ds)
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step}/{len(ds)}]loss: {losses.mean():.7f}")
pbar.set_description(f"[Epoch {epoch_num}: {epoch_step+1}/{len(ds)}]loss: {losses.mean():.7f}")
if embedding.step > 0 and embedding_dir is not None and embedding.step % save_embedding_every == 0:
if embedding_dir is not None and steps_done % save_embedding_every == 0:
# Before saving, change name to match current checkpoint.
embedding.name = f'{embedding_name}-{embedding.step}'
last_saved_file = os.path.join(embedding_dir, f'{embedding.name}.pt')
embedding.save(last_saved_file)
embedding_name_every = f'{embedding_name}-{steps_done}'
last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt')
save_embedding(embedding, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True)
embedding_yet_to_be_embedded = True
write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), {
@ -300,9 +328,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
"learn_rate": scheduler.learn_rate
})
if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0:
forced_filename = f'{embedding_name}-{embedding.step}'
if images_dir is not None and steps_done % create_image_every == 0:
forced_filename = f'{embedding_name}-{steps_done}'
last_saved_image = os.path.join(images_dir, forced_filename)
shared.sd_model.first_stage_model.to(devices.device)
p = processing.StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model,
do_not_save_grid=True,
@ -330,11 +361,14 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
processed = processing.process_images(p)
image = processed.images[0]
if unload:
shared.sd_model.first_stage_model.to(devices.cpu)
shared.state.current_image = image
if save_image_with_stored_embedding and os.path.exists(last_saved_file) and embedding_yet_to_be_embedded:
last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{embedding.step}.png')
last_saved_image_chunks = os.path.join(images_embeds_dir, f'{embedding_name}-{steps_done}.png')
info = PngImagePlugin.PngInfo()
data = torch.load(last_saved_file)
@ -350,7 +384,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc
checkpoint = sd_models.select_checkpoint()
footer_left = checkpoint.model_name
footer_mid = '[{}]'.format(checkpoint.hash)
footer_right = '{}v {}s'.format(vectorSize, embedding.step)
footer_right = '{}v {}s'.format(vectorSize, steps_done)
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
captioned_image = insert_image_data_embed(captioned_image, data)
@ -373,14 +407,27 @@ Last saved image: {html.escape(last_saved_image)}<br/>
</p>
"""
checkpoint = sd_models.select_checkpoint()
embedding.sd_checkpoint = checkpoint.hash
embedding.sd_checkpoint_name = checkpoint.model_name
embedding.cached_checksum = None
# Before saving for the last time, change name back to base name (as opposed to the save_embedding_every step-suffixed naming convention).
embedding.name = embedding_name
filename = os.path.join(shared.cmd_opts.embedding_dir, f'{embedding.name}.pt')
embedding.save(filename)
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True)
shared.sd_model.first_stage_model.to(devices.device)
return embedding, filename
def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True):
old_embedding_name = embedding.name
old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None
old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None
old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None
try:
embedding.sd_checkpoint = checkpoint.hash
embedding.sd_checkpoint_name = checkpoint.model_name
if remove_cached_checksum:
embedding.cached_checksum = None
embedding.name = embedding_name
embedding.save(filename)
except:
embedding.sd_checkpoint = old_sd_checkpoint
embedding.sd_checkpoint_name = old_sd_checkpoint_name
embedding.name = old_embedding_name
embedding.cached_checksum = old_cached_checksum
raise

View File

@ -25,8 +25,10 @@ def train_embedding(*args):
assert not shared.cmd_opts.lowvram, 'Training models with lowvram not possible'
apply_optimizations = shared.opts.training_xattention_optimizations
try:
sd_hijack.undo_optimizations()
if not apply_optimizations:
sd_hijack.undo_optimizations()
embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args)
@ -38,5 +40,6 @@ Embedding saved to {html.escape(filename)}
except Exception:
raise
finally:
sd_hijack.apply_optimizations()
if not apply_optimizations:
sd_hijack.apply_optimizations()

View File

@ -47,6 +47,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
if processed is None:
processed = process_images(p)
p.close()
shared.total_tqdm.clear()
generation_info_js = processed.js()

View File

@ -1,6 +1,4 @@
import base64
import html
import io
import json
import math
import mimetypes
@ -18,15 +16,10 @@ import gradio as gr
import gradio.routes
import gradio.utils
import numpy as np
import piexif
import torch
from PIL import Image, PngImagePlugin
import gradio as gr
import gradio.utils
import gradio.routes
from modules import sd_hijack, sd_models, localization, script_callbacks
from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions
from modules.paths import script_path
from modules.shared import opts, cmd_opts, restricted_opts
@ -35,7 +28,7 @@ if cmd_opts.deepdanbooru:
from modules.deepbooru import get_deepbooru_tags
import modules.codeformer_model
import modules.generation_parameters_copypaste
import modules.generation_parameters_copypaste as parameters_copypaste
import modules.gfpgan_model
import modules.hypernetworks.ui
import modules.ldsr_model
@ -49,13 +42,11 @@ from modules.sd_hijack import model_hijack
from modules.sd_samplers import samplers, samplers_for_img2img
import modules.textual_inversion.ui
import modules.hypernetworks.ui
from modules.generation_parameters_copypaste import image_from_url_text
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
txt2img_paste_fields = []
img2img_paste_fields = []
if not cmd_opts.share and not cmd_opts.listen:
# fix gradio phoning home
@ -98,37 +89,11 @@ def plaintext_to_html(text):
text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
return text
def image_from_url_text(filedata):
if type(filedata) == dict and filedata["is_file"]:
filename = filedata["name"]
tempdir = os.path.normpath(tempfile.gettempdir())
normfn = os.path.normpath(filename)
assert normfn.startswith(tempdir), 'trying to open image file not in temporary directory'
return Image.open(filename)
if type(filedata) == list:
if len(filedata) == 0:
return None
filedata = filedata[0]
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
filedata = base64.decodebytes(filedata.encode('utf-8'))
image = Image.open(io.BytesIO(filedata))
return image
def send_gradio_gallery_to_image(x):
if len(x) == 0:
return None
return image_from_url_text(x[0])
def save_files(js_data, images, do_make_zip, index):
import csv
filenames = []
@ -192,7 +157,6 @@ def save_files(js_data, images, do_make_zip, index):
return gr.File.update(value=fullfns, visible=True), '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def save_pil_to_file(pil_image, dir=None):
use_metadata = False
metadata = PngImagePlugin.PngInfo()
@ -626,10 +590,90 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele
return refresh_button
def create_output_panel(tabname, outdir):
def open_folder(f):
if not os.path.exists(f):
print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.')
return
elif not os.path.isdir(f):
print(f"""
WARNING
An open_folder request was made with an argument that is not a folder.
This could be an error or a malicious attempt to run code on your computer.
Requested path was: {f}
""", file=sys.stderr)
return
if not shared.cmd_opts.hide_ui_dir_config:
path = os.path.normpath(f)
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
sp.Popen(["open", path])
else:
sp.Popen(["xdg-open", path])
with gr.Column(variant='panel'):
with gr.Group():
result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(grid=4)
generation_info = None
with gr.Column():
with gr.Row():
if tabname != "extras":
save = gr.Button('Save', elem_id=f'save_{tabname}')
buttons = parameters_copypaste.create_buttons(["img2img", "inpaint", "extras"])
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_folder_button = gr.Button(folder_symbol, elem_id=button_id)
open_folder_button.click(
fn=lambda: open_folder(opts.outdir_samples or outdir),
inputs=[],
outputs=[],
)
if tabname != "extras":
with gr.Row():
do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
with gr.Row():
download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
result_gallery,
do_make_zip,
html_info,
],
outputs=[
download_files,
html_info,
html_info,
html_info,
]
)
else:
html_info_x = gr.HTML()
html_info = gr.HTML()
parameters_copypaste.bind_buttons(buttons, result_gallery, "txt2img" if tabname == "txt2img" else None)
return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info
def create_ui(wrap_gradio_gpu_call):
import modules.img2img
import modules.txt2img
reload_javascript()
parameters_copypaste.reset()
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False)
@ -675,30 +719,8 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Group():
custom_inputs = modules.scripts.scripts_txt2img.setup_ui(is_img2img=False)
with gr.Column(variant='panel'):
with gr.Group():
txt2img_preview = gr.Image(elem_id='txt2img_preview', visible=False)
txt2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='txt2img_gallery').style(grid=4)
with gr.Column():
with gr.Row():
save = gr.Button('Save')
send_to_img2img = gr.Button('Send to img2img')
send_to_inpaint = gr.Button('Send to inpaint')
send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Row():
do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
with gr.Row():
download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
txt2img_gallery, generation_info, html_info = create_output_panel("txt2img", opts.outdir_txt2img_samples)
parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
@ -756,23 +778,6 @@ def create_ui(wrap_gradio_gpu_call):
outputs=[hr_options],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
do_make_zip,
html_info,
],
outputs=[
download_files,
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_txt2img_tokens",
@ -784,7 +789,6 @@ def create_ui(wrap_gradio_gpu_call):
]
)
global txt2img_paste_fields
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
@ -807,6 +811,7 @@ def create_ui(wrap_gradio_gpu_call):
(firstphase_height, "First pass size-2"),
*modules.scripts.scripts_txt2img.infotext_fields
]
parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields)
txt2img_preview_params = [
txt2img_prompt,
@ -893,30 +898,8 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Group():
custom_inputs = modules.scripts.scripts_img2img.setup_ui(is_img2img=True)
with gr.Column(variant='panel'):
with gr.Group():
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
img2img_gallery = gr.Gallery(label='Output', show_label=False, elem_id='img2img_gallery').style(grid=4)
with gr.Column():
with gr.Row():
save = gr.Button('Save')
img2img_send_to_img2img = gr.Button('Send to img2img')
img2img_send_to_inpaint = gr.Button('Send to inpaint')
img2img_send_to_extras = gr.Button('Send to extras')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id)
with gr.Row():
do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
with gr.Row():
download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
img2img_gallery, generation_info, html_info = create_output_panel("img2img", opts.outdir_img2img_samples)
parameters_copypaste.bind_buttons({"img2img": img2img_paste}, None, img2img_prompt)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
@ -1003,25 +986,9 @@ def create_ui(wrap_gradio_gpu_call):
fn=interrogate_deepbooru,
inputs=[init_img],
outputs=[img2img_prompt],
)
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
do_make_zip,
html_info,
],
outputs=[
download_files,
html_info,
html_info,
html_info,
]
)
roll.click(
fn=roll_artist,
_js="update_img2img_tokens",
@ -1055,7 +1022,8 @@ def create_ui(wrap_gradio_gpu_call):
outputs=[prompt, negative_prompt, style1, style2],
)
global img2img_paste_fields
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
img2img_paste_fields = [
(img2img_prompt, "Prompt"),
(img2img_negative_prompt, "Negative prompt"),
@ -1074,7 +1042,8 @@ def create_ui(wrap_gradio_gpu_call):
(denoising_strength, "Denoising strength"),
*modules.scripts.scripts_img2img.infotext_fields
]
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields)
parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields)
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
@ -1087,12 +1056,8 @@ def create_ui(wrap_gradio_gpu_call):
image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file")
with gr.TabItem('Batch from Directory'):
extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs,
placeholder="A directory on the same machine where the server is running."
)
extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs,
placeholder="Leave blank to save images to the default path."
)
extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.")
extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.")
show_extras_results = gr.Checkbox(label='Show result images', value=True)
with gr.Tabs(elem_id="extras_resize_mode"):
@ -1119,17 +1084,12 @@ def create_ui(wrap_gradio_gpu_call):
codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer)
codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer)
with gr.Group():
upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False)
submit = gr.Button('Generate', elem_id="extras_generate", variant='primary')
with gr.Column(variant='panel'):
result_images = gr.Gallery(label="Result", show_label=False)
html_info_x = gr.HTML()
html_info = gr.HTML()
extras_send_to_img2img = gr.Button('Send to img2img')
extras_send_to_inpaint = gr.Button('Send to inpaint')
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else ''
open_extras_folder = gr.Button('Open output directory', elem_id=button_id)
result_images, html_info_x, html_info = create_output_panel("extras", opts.outdir_extras_samples)
submit.click(
fn=wrap_gradio_gpu_call(modules.extras.run_extras),
@ -1152,6 +1112,7 @@ def create_ui(wrap_gradio_gpu_call):
extras_upscaler_1,
extras_upscaler_2,
extras_upscaler_2_visibility,
upscale_before_face_fix,
],
outputs=[
result_images,
@ -1159,19 +1120,11 @@ def create_ui(wrap_gradio_gpu_call):
html_info,
]
)
parameters_copypaste.add_paste_fields("extras", extras_image, None)
extras_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[result_images],
outputs=[init_img],
)
extras_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[result_images],
outputs=[init_img_with_mask],
extras_image.change(
fn=modules.extras.clear_cache,
inputs=[], outputs=[]
)
with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
@ -1183,10 +1136,9 @@ def create_ui(wrap_gradio_gpu_call):
html = gr.HTML()
generation_info = gr.Textbox(visible=False)
html2 = gr.HTML()
with gr.Row():
pnginfo_send_to_txt2img = gr.Button('Send to txt2img')
pnginfo_send_to_img2img = gr.Button('Send to img2img')
buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"])
parameters_copypaste.bind_buttons(buttons, image, generation_info)
image.change(
fn=wrap_gradio_call(modules.extras.run_pnginfo),
@ -1238,7 +1190,7 @@ def create_ui(wrap_gradio_gpu_call):
new_hypernetwork_name = gr.Textbox(label="Name")
new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "320", "640", "1280"])
new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'")
new_hypernetwork_activation_func = gr.Dropdown(value="relu", label="Select activation function of hypernetwork", choices=modules.hypernetworks.ui.keys)
new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork", choices=modules.hypernetworks.ui.keys)
new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"])
new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization")
new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout")
@ -1491,28 +1443,6 @@ def create_ui(wrap_gradio_gpu_call):
script_callbacks.ui_settings_callback()
opts.reorder()
def open_folder(f):
if not os.path.exists(f):
print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.')
return
elif not os.path.isdir(f):
print(f"""
WARNING
An open_folder request was made with an argument that is not a folder.
This could be an error or a malicious attempt to run code on your computer.
Requested path was: {f}
""", file=sys.stderr)
return
if not shared.cmd_opts.hide_ui_dir_config:
path = os.path.normpath(f)
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
sp.Popen(["open", path])
else:
sp.Popen(["xdg-open", path])
def run_settings(*args):
changed = 0
@ -1584,8 +1514,9 @@ Requested path was: {f}
column = None
with gr.Row(elem_id="settings").style(equal_height=False):
for i, (k, item) in enumerate(opts.data_labels.items()):
section_must_be_skipped = item.section[0] is None
if previous_section != item.section:
if previous_section != item.section and not section_must_be_skipped:
if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None):
if column is not None:
column.__exit__()
@ -1604,6 +1535,8 @@ Requested path was: {f}
if k in quicksettings_names and not shared.cmd_opts.freeze_settings:
quicksettings_list.append((i, k, item))
components.append(dummy_component)
elif section_must_be_skipped:
components.append(dummy_component)
else:
component = create_setting_component(k)
component_dict[k] = component
@ -1645,9 +1578,10 @@ Requested path was: {f}
def request_restart():
shared.state.interrupt()
settings_interface.gradio_ref.do_restart = True
shared.state.need_restart = True
restart_gradio.click(
fn=request_restart,
inputs=[],
outputs=[],
@ -1666,10 +1600,6 @@ Requested path was: {f}
(train_interface, "Train", "ti"),
]
interfaces += script_callbacks.ui_tabs_callback()
interfaces += [(settings_interface, "Settings", "settings")]
css = ""
for cssfile in modules.scripts.list_files_with_name("style.css"):
@ -1686,13 +1616,20 @@ Requested path was: {f}
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
interfaces += script_callbacks.ui_tabs_callback()
interfaces += [(settings_interface, "Settings", "settings")]
extensions_interface = ui_extensions.create_ui()
interfaces += [(extensions_interface, "Extensions", "extensions")]
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings"):
for i, k, item in quicksettings_list:
component = create_setting_component(k, is_quicksettings=True)
component_dict[k] = component
settings_interface.gradio_ref = demo
parameters_copypaste.integrate_settings_paste_fields(component_dict)
parameters_copypaste.run_bind()
with gr.Tabs(elem_id="tabs") as tabs:
for interface, label, ifid in interfaces:
@ -1747,85 +1684,6 @@ Requested path was: {f}
component_dict['sd_model_checkpoint'],
]
)
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration', 'Seed', 'Size-1', 'Size-2']
txt2img_fields = [field for field,name in txt2img_paste_fields if name in paste_field_names]
img2img_fields = [field for field,name in img2img_paste_fields if name in paste_field_names]
send_to_img2img.click(
fn=lambda img, *args: (image_from_url_text(img),*args),
_js="(gallery, ...args) => [extract_image_from_gallery_img2img(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img] + img2img_fields,
)
send_to_inpaint.click(
fn=lambda x, *args: (image_from_url_text(x), *args),
_js="(gallery, ...args) => [extract_image_from_gallery_inpaint(gallery), ...args]",
inputs=[txt2img_gallery] + txt2img_fields,
outputs=[init_img_with_mask] + img2img_fields,
)
img2img_send_to_img2img.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_img2img",
inputs=[img2img_gallery],
outputs=[init_img],
)
img2img_send_to_inpaint.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_inpaint",
inputs=[img2img_gallery],
outputs=[init_img_with_mask],
)
send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[txt2img_gallery],
outputs=[extras_image],
)
open_txt2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_txt2img_samples),
inputs=[],
outputs=[],
)
open_img2img_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_img2img_samples),
inputs=[],
outputs=[],
)
open_extras_folder.click(
fn=lambda: open_folder(opts.outdir_samples or opts.outdir_extras_samples),
inputs=[],
outputs=[],
)
img2img_send_to_extras.click(
fn=lambda x: image_from_url_text(x),
_js="extract_image_from_gallery_extras",
inputs=[img2img_gallery],
outputs=[extras_image],
)
settings_map = {
'sd_hypernetwork': 'Hypernet',
'CLIP_stop_at_last_layers': 'Clip skip',
'sd_model_checkpoint': 'Model hash',
}
settings_paste_fields = [
(component_dict[k], lambda d, k=k, v=v: apply_setting(k, d.get(v, None)))
for k, v in settings_map.items()
]
modules.generation_parameters_copypaste.connect_paste(txt2img_paste, txt2img_paste_fields + settings_paste_fields, txt2img_prompt)
modules.generation_parameters_copypaste.connect_paste(img2img_paste, img2img_paste_fields + settings_paste_fields, img2img_prompt)
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_txt2img, txt2img_paste_fields + settings_paste_fields, generation_info, 'switch_to_txt2img')
modules.generation_parameters_copypaste.connect_paste(pnginfo_send_to_img2img, img2img_paste_fields + settings_paste_fields, generation_info, 'switch_to_img2img_img2img')
ui_config_file = cmd_opts.ui_config_file
ui_settings = {}
@ -1845,7 +1703,7 @@ Requested path was: {f}
def apply_field(obj, field, condition=None, init_field=None):
key = path + "/" + field
if getattr(obj,'custom_script_source',None) is not None:
if getattr(obj, 'custom_script_source', None) is not None:
key = 'customscript/' + obj.custom_script_source + '/' + key
if getattr(obj, 'do_not_save_to_config', False):
@ -1926,4 +1784,3 @@ def load_javascript(raw_response):
reload_javascript = partial(load_javascript, gradio.routes.templates.TemplateResponse)
reload_javascript()

268
modules/ui_extensions.py Normal file
View File

@ -0,0 +1,268 @@
import json
import os.path
import shutil
import sys
import time
import traceback
import git
import gradio as gr
import html
from modules import extensions, shared, paths
available_extensions = {"extensions": []}
def check_access():
assert not shared.cmd_opts.disable_extension_access, "extension access disabed because of commandline flags"
def apply_and_restart(disable_list, update_list):
check_access()
disabled = json.loads(disable_list)
assert type(disabled) == list, f"wrong disable_list data for apply_and_restart: {disable_list}"
update = json.loads(update_list)
assert type(update) == list, f"wrong update_list data for apply_and_restart: {update_list}"
update = set(update)
for ext in extensions.extensions:
if ext.name not in update:
continue
try:
ext.pull()
except Exception:
print(f"Error pulling updates for {ext.name}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
shared.opts.disabled_extensions = disabled
shared.opts.save(shared.config_filename)
shared.state.interrupt()
shared.state.need_restart = True
def check_updates():
check_access()
for ext in extensions.extensions:
if ext.remote is None:
continue
try:
ext.check_updates()
except Exception:
print(f"Error checking updates for {ext.name}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return extension_table()
def extension_table():
code = f"""<!-- {time.time()} -->
<table id="extensions">
<thead>
<tr>
<th><abbr title="Use checkbox to enable the extension; it will be enabled or disabled when you click apply button">Extension</abbr></th>
<th>URL</th>
<th><abbr title="Use checkbox to mark the extension for update; it will be updated when you click apply button">Update</abbr></th>
</tr>
</thead>
<tbody>
"""
for ext in extensions.extensions:
if ext.can_update:
ext_status = f"""<label><input class="gr-check-radio gr-checkbox" name="update_{html.escape(ext.name)}" checked="checked" type="checkbox">{html.escape(ext.status)}</label>"""
else:
ext_status = ext.status
code += f"""
<tr>
<td><label><input class="gr-check-radio gr-checkbox" name="enable_{html.escape(ext.name)}" type="checkbox" {'checked="checked"' if ext.enabled else ''}>{html.escape(ext.name)}</label></td>
<td><a href="{html.escape(ext.remote or '')}">{html.escape(ext.remote or '')}</a></td>
<td{' class="extension_status"' if ext.remote is not None else ''}>{ext_status}</td>
</tr>
"""
code += """
</tbody>
</table>
"""
return code
def normalize_git_url(url):
if url is None:
return ""
url = url.replace(".git", "")
return url
def install_extension_from_url(dirname, url):
check_access()
assert url, 'No URL specified'
if dirname is None or dirname == "":
*parts, last_part = url.split('/')
last_part = normalize_git_url(last_part)
dirname = last_part
target_dir = os.path.join(extensions.extensions_dir, dirname)
assert not os.path.exists(target_dir), f'Extension directory already exists: {target_dir}'
normalized_url = normalize_git_url(url)
assert len([x for x in extensions.extensions if normalize_git_url(x.remote) == normalized_url]) == 0, 'Extension with this URL is already installed'
tmpdir = os.path.join(paths.script_path, "tmp", dirname)
try:
shutil.rmtree(tmpdir, True)
repo = git.Repo.clone_from(url, tmpdir)
repo.remote().fetch()
os.rename(tmpdir, target_dir)
extensions.list_extensions()
return [extension_table(), html.escape(f"Installed into {target_dir}. Use Installed tab to restart.")]
finally:
shutil.rmtree(tmpdir, True)
def install_extension_from_index(url):
ext_table, message = install_extension_from_url(None, url)
return refresh_available_extensions_from_data(), ext_table, message
def refresh_available_extensions(url):
global available_extensions
import urllib.request
with urllib.request.urlopen(url) as response:
text = response.read()
available_extensions = json.loads(text)
return url, refresh_available_extensions_from_data(), ''
def refresh_available_extensions_from_data():
extlist = available_extensions["extensions"]
installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions}
code = f"""<!-- {time.time()} -->
<table id="available_extensions">
<thead>
<tr>
<th>Extension</th>
<th>Description</th>
<th>Action</th>
</tr>
</thead>
<tbody>
"""
for ext in extlist:
name = ext.get("name", "noname")
url = ext.get("url", None)
description = ext.get("description", "")
if url is None:
continue
existing = installed_extension_urls.get(normalize_git_url(url), None)
install_code = f"""<input onclick="install_extension_from_index(this, '{html.escape(url)}')" type="button" value="{"Install" if not existing else "Installed"}" {"disabled=disabled" if existing else ""} class="gr-button gr-button-lg gr-button-secondary">"""
code += f"""
<tr>
<td><a href="{html.escape(url)}">{html.escape(name)}</a></td>
<td>{html.escape(description)}</td>
<td>{install_code}</td>
</tr>
"""
code += """
</tbody>
</table>
"""
return code
def create_ui():
import modules.ui
with gr.Blocks(analytics_enabled=False) as ui:
with gr.Tabs(elem_id="tabs_extensions") as tabs:
with gr.TabItem("Installed"):
with gr.Row():
apply = gr.Button(value="Apply and restart UI", variant="primary")
check = gr.Button(value="Check for updates")
extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False)
extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False)
extensions_table = gr.HTML(lambda: extension_table())
apply.click(
fn=apply_and_restart,
_js="extensions_apply",
inputs=[extensions_disabled_list, extensions_update_list],
outputs=[],
)
check.click(
fn=check_updates,
_js="extensions_check",
inputs=[],
outputs=[extensions_table],
)
with gr.TabItem("Available"):
with gr.Row():
refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary")
available_extensions_index = gr.Text(value="https://raw.githubusercontent.com/wiki/AUTOMATIC1111/stable-diffusion-webui/Extensions-index.md", label="Extension index URL").style(container=False)
extension_to_install = gr.Text(elem_id="extension_to_install", visible=False)
install_extension_button = gr.Button(elem_id="install_extension_button", visible=False)
install_result = gr.HTML()
available_extensions_table = gr.HTML()
refresh_available_extensions_button.click(
fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update()]),
inputs=[available_extensions_index],
outputs=[available_extensions_index, available_extensions_table, install_result],
)
install_extension_button.click(
fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]),
inputs=[extension_to_install],
outputs=[available_extensions_table, extensions_table, install_result],
)
with gr.TabItem("Install from URL"):
install_url = gr.Text(label="URL for extension's git repository")
install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto")
install_button = gr.Button(value="Install", variant="primary")
install_result = gr.HTML(elem_id="extension_install_result")
install_button.click(
fn=modules.ui.wrap_gradio_call(install_extension_from_url, extra_outputs=[gr.update()]),
inputs=[install_dirname, install_url],
outputs=[extensions_table, install_result],
)
return ui

View File

@ -12,7 +12,7 @@ opencv-python
requests
piexif
Pillow
pytorch_lightning
pytorch_lightning==1.7.7
realesrgan
scikit-image>=0.19
timm==0.4.12
@ -26,3 +26,4 @@ torchdiffeq
kornia
lark
inflection
GitPython

View File

@ -23,3 +23,4 @@ torchdiffeq==0.2.3
kornia==0.6.7
lark==1.1.2
inflection==0.5.1
GitPython==3.1.27

View File

@ -96,6 +96,7 @@ class Script(scripts.Script):
def ui(self, is_img2img):
checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False)
checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False)
prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1)
file = gr.File(label="Upload prompt inputs", type='bytes')
@ -106,9 +107,9 @@ class Script(scripts.Script):
# We don't shrink back to 1, because that causes the control to ignore [enter], and it may
# be unclear to the user that shift-enter is needed.
prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt])
return [checkbox_iterate, file, prompt_txt]
return [checkbox_iterate, checkbox_iterate_batch, file, prompt_txt]
def run(self, p, checkbox_iterate, file, prompt_txt: str):
def run(self, p, checkbox_iterate, checkbox_iterate_batch, file, prompt_txt: str):
lines = [x.strip() for x in prompt_txt.splitlines()]
lines = [x for x in lines if len(x) > 0]
@ -137,7 +138,7 @@ class Script(scripts.Script):
jobs.append(args)
print(f"Will process {len(lines)} lines in {job_count} jobs.")
if (checkbox_iterate and p.seed == -1):
if (checkbox_iterate or checkbox_iterate_batch) and p.seed == -1:
p.seed = int(random.randrange(4294967294))
state.job_count = job_count
@ -153,7 +154,7 @@ class Script(scripts.Script):
proc = process_images(copy_p)
images += proc.images
if (checkbox_iterate):
if checkbox_iterate:
p.seed = p.seed + (p.batch_size * p.n_iter)

View File

@ -153,7 +153,6 @@ def str_permutations(x):
"""dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
return x
AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value", "confirm"])
AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value", "confirm"])
@ -178,6 +177,7 @@ axis_options = [
AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None),
AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None),
AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None),
AxisOption("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight"), format_value_add_label, None),
]

View File

@ -314,8 +314,8 @@ input[type="range"]{
.modalControls {
display: grid;
grid-template-columns: 32px auto 1fr 32px;
grid-template-areas: "zoom tile space close";
grid-template-columns: 32px 32px 32px 1fr 32px;
grid-template-areas: "zoom tile save space close";
position: absolute;
top: 0;
left: 0;
@ -333,6 +333,10 @@ input[type="range"]{
grid-area: zoom;
}
.modalSave {
grid-area: save;
}
.modalTileImage {
grid-area: tile;
}
@ -346,8 +350,18 @@ input[type="range"]{
cursor: pointer;
}
.modalSave {
color: white;
font-size: 28px;
margin-top: 8px;
font-weight: bold;
cursor: pointer;
}
.modalClose:hover,
.modalClose:focus,
.modalSave:hover,
.modalSave:focus,
.modalZoom:hover,
.modalZoom:focus {
color: #999;
@ -516,24 +530,52 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h
min-height: 480px !important;
}
/* Extensions */
#tab_extensions table{
border-collapse: collapse;
}
#tab_extensions table td, #tab_extensions table th{
border: 1px solid #ccc;
padding: 0.25em 0.5em;
}
#tab_extensions table input[type="checkbox"]{
margin-right: 0.5em;
}
#tab_extensions button{
max-width: 16em;
}
#tab_extensions input[disabled="disabled"]{
opacity: 0.5;
}
/* The following handles localization for right-to-left (RTL) languages like Arabic.
The rtl media type will only be activated by the logic in javascript/localization.js.
If you change anything above, you need to make sure it is RTL compliant by just running
your changes through converters like https://cssjanus.github.io/ or https://rtlcss.com/.
Then, you will need to add the RTL counterpart only if needed in the rtl section below.*/
@media rtl {
/* this part was manualy added */
/* this part was added manually */
:host {
direction: rtl;
}
.output-html:has(.performance), .gr-text-input {
select, .file-preview, .gr-text-input, .output-html:has(.performance), #ti_progress {
direction: ltr;
}
#script_list > label > select,
#x_type > label > select,
#y_type > label > select {
direction: rtl;
}
.gr-radio, .gr-checkbox{
margin-left: 0.25em;
}
/* this part was automatically generated with few manual modifications */
/* automatically generated with few manual modifications */
.performance .time {
margin-right: unset;
margin-left: 0;

0
test/__init__.py Normal file
View File

29
test/extras_test.py Normal file
View File

@ -0,0 +1,29 @@
import unittest
class TestExtrasWorking(unittest.TestCase):
def setUp(self):
self.url_img2img = "http://localhost:7860/sdapi/v1/extra-single-image"
self.simple_extras = {
"resize_mode": 0,
"show_extras_results": True,
"gfpgan_visibility": 0,
"codeformer_visibility": 0,
"codeformer_weight": 0,
"upscaling_resize": 2,
"upscaling_resize_w": 512,
"upscaling_resize_h": 512,
"upscaling_crop": True,
"upscaler_1": "None",
"upscaler_2": "None",
"extras_upscaler_2_visibility": 0,
"image": ""
}
class TestExtrasCorrectness(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()

59
test/img2img_test.py Normal file
View File

@ -0,0 +1,59 @@
import unittest
import requests
from gradio.processing_utils import encode_pil_to_base64
from PIL import Image
class TestImg2ImgWorking(unittest.TestCase):
def setUp(self):
self.url_img2img = "http://localhost:7860/sdapi/v1/img2img"
self.simple_img2img = {
"init_images": [encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))],
"resize_mode": 0,
"denoising_strength": 0.75,
"mask": None,
"mask_blur": 4,
"inpainting_fill": 0,
"inpaint_full_res": False,
"inpaint_full_res_padding": 0,
"inpainting_mask_invert": 0,
"prompt": "example prompt",
"styles": [],
"seed": -1,
"subseed": -1,
"subseed_strength": 0,
"seed_resize_from_h": -1,
"seed_resize_from_w": -1,
"batch_size": 1,
"n_iter": 1,
"steps": 3,
"cfg_scale": 7,
"width": 64,
"height": 64,
"restore_faces": False,
"tiling": False,
"negative_prompt": "",
"eta": 0,
"s_churn": 0,
"s_tmax": 0,
"s_tmin": 0,
"s_noise": 1,
"override_settings": {},
"sampler_index": "Euler a",
"include_init_images": False
}
def test_img2img_simple_performed(self):
self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)
def test_inpainting_masked_performed(self):
self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png"))
self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)
class TestImg2ImgCorrectness(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()

19
test/server_poll.py Normal file
View File

@ -0,0 +1,19 @@
import unittest
import requests
import time
def run_tests():
timeout_threshold = 240
start_time = time.time()
while time.time()-start_time < timeout_threshold:
try:
requests.head("http://localhost:7860/")
break
except requests.exceptions.ConnectionError:
pass
if time.time()-start_time < timeout_threshold:
suite = unittest.TestLoader().discover('', pattern='*_test.py')
result = unittest.TextTestRunner(verbosity=2).run(suite)
else:
print("Launch unsuccessful")

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 362 B

74
test/txt2img_test.py Normal file
View File

@ -0,0 +1,74 @@
import unittest
import requests
class TestTxt2ImgWorking(unittest.TestCase):
def setUp(self):
self.url_txt2img = "http://localhost:7860/sdapi/v1/txt2img"
self.simple_txt2img = {
"enable_hr": False,
"denoising_strength": 0,
"firstphase_width": 0,
"firstphase_height": 0,
"prompt": "example prompt",
"styles": [],
"seed": -1,
"subseed": -1,
"subseed_strength": 0,
"seed_resize_from_h": -1,
"seed_resize_from_w": -1,
"batch_size": 1,
"n_iter": 1,
"steps": 3,
"cfg_scale": 7,
"width": 64,
"height": 64,
"restore_faces": False,
"tiling": False,
"negative_prompt": "",
"eta": 0,
"s_churn": 0,
"s_tmax": 0,
"s_tmin": 0,
"s_noise": 1,
"sampler_index": "Euler a"
}
def test_txt2img_simple_performed(self):
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
def test_txt2img_with_negative_prompt_performed(self):
self.simple_txt2img["negative_prompt"] = "example negative prompt"
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
def test_txt2img_not_square_image_performed(self):
self.simple_txt2img["height"] = 128
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
def test_txt2img_with_hrfix_performed(self):
self.simple_txt2img["enable_hr"] = True
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
def test_txt2img_with_restore_faces_performed(self):
self.simple_txt2img["restore_faces"] = True
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
def test_txt2img_with_tiling_faces_performed(self):
self.simple_txt2img["tiling"] = True
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
def test_txt2img_with_vanilla_sampler_performed(self):
self.simple_txt2img["sampler_index"] = "PLMS"
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
def test_txt2img_multiple_batches_performed(self):
self.simple_txt2img["n_iter"] = 2
self.assertEqual(requests.post(self.url_txt2img, json=self.simple_txt2img).status_code, 200)
class TestTxt2ImgCorrectness(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main()

View File

@ -9,7 +9,7 @@ from fastapi.middleware.gzip import GZipMiddleware
from modules.paths import script_path
from modules import devices, sd_samplers, upscaler
from modules import devices, sd_samplers, upscaler, extensions
import modules.codeformer_model as codeformer
import modules.extras
import modules.face_restoration
@ -23,6 +23,7 @@ import modules.sd_hijack
import modules.sd_models
import modules.shared as shared
import modules.txt2img
import modules.script_callbacks
import modules.ui
from modules import devices
@ -46,26 +47,13 @@ def wrap_queued_call(func):
def wrap_gradio_gpu_call(func, extra_outputs=None):
def f(*args, **kwargs):
devices.torch_gc()
shared.state.sampling_step = 0
shared.state.job_count = -1
shared.state.job_no = 0
shared.state.job_timestamp = shared.state.get_job_timestamp()
shared.state.current_latent = None
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
shared.state.skipped = False
shared.state.interrupted = False
shared.state.textinfo = None
shared.state.begin()
with queue_lock:
res = func(*args, **kwargs)
shared.state.job = ""
shared.state.job_count = 0
devices.torch_gc()
shared.state.end()
return res
@ -73,6 +61,8 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
def initialize():
extensions.list_extensions()
if cmd_opts.ui_debug_mode:
shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
modules.scripts.load_scripts()
@ -88,7 +78,7 @@ def initialize():
modules.scripts.load_scripts()
modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
@ -105,15 +95,18 @@ def create_api(app):
api = Api(app, queue_lock)
return api
def wait_on_server(demo=None):
while 1:
time.sleep(0.5)
if demo and getattr(demo, 'do_restart', False):
if shared.state.need_restart:
shared.state.need_restart = False
time.sleep(0.5)
demo.close()
time.sleep(0.5)
break
def api_only():
initialize()
@ -145,14 +138,18 @@ def webui():
app.add_middleware(GZipMiddleware, minimum_size=1000)
if (launch_api):
if launch_api:
create_api(app)
modules.script_callbacks.app_started_callback(demo, app)
wait_on_server(demo)
sd_samplers.set_samplers()
print('Reloading Custom Scripts')
print('Reloading extensions')
extensions.list_extensions()
print('Reloading custom scripts')
modules.scripts.reload_scripts()
print('Reloading modules: modules.ui')
importlib.reload(modules.ui)
@ -161,8 +158,6 @@ def webui():
print('Restarting Gradio')
task = []
if __name__ == "__main__":
if cmd_opts.nowebui:
api_only()

View File

@ -102,15 +102,14 @@ then
exit 1
fi
printf "\n%s\n" "${delimiter}"
printf "Clone or update stable-diffusion-webui"
printf "\n%s\n" "${delimiter}"
cd "${install_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/, aborting...\e[0m" "${install_dir}"; exit 1; }
if [[ -d "${clone_dir}" ]]
then
cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
"${GIT}" pull
else
printf "\n%s\n" "${delimiter}"
printf "Clone stable-diffusion-webui"
printf "\n%s\n" "${delimiter}"
"${GIT}" clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git "${clone_dir}"
cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
fi