Merge branch 'dev' into patch-1

This commit is contained in:
AUTOMATIC1111 2023-07-08 16:50:23 +03:00 committed by GitHub
commit 993dd9a892
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
185 changed files with 8855 additions and 4785 deletions

4
.eslintignore Normal file
View File

@ -0,0 +1,4 @@
extensions
extensions-disabled
repositories
venv

91
.eslintrc.js Normal file
View File

@ -0,0 +1,91 @@
/* global module */
module.exports = {
env: {
browser: true,
es2021: true,
},
extends: "eslint:recommended",
parserOptions: {
ecmaVersion: "latest",
},
rules: {
"arrow-spacing": "error",
"block-spacing": "error",
"brace-style": "error",
"comma-dangle": ["error", "only-multiline"],
"comma-spacing": "error",
"comma-style": ["error", "last"],
"curly": ["error", "multi-line", "consistent"],
"eol-last": "error",
"func-call-spacing": "error",
"function-call-argument-newline": ["error", "consistent"],
"function-paren-newline": ["error", "consistent"],
"indent": ["error", 4],
"key-spacing": "error",
"keyword-spacing": "error",
"linebreak-style": ["error", "unix"],
"no-extra-semi": "error",
"no-mixed-spaces-and-tabs": "error",
"no-multi-spaces": "error",
"no-redeclare": ["error", {builtinGlobals: false}],
"no-trailing-spaces": "error",
"no-unused-vars": "off",
"no-whitespace-before-property": "error",
"object-curly-newline": ["error", {consistent: true, multiline: true}],
"object-curly-spacing": ["error", "never"],
"operator-linebreak": ["error", "after"],
"quote-props": ["error", "consistent-as-needed"],
"semi": ["error", "always"],
"semi-spacing": "error",
"semi-style": ["error", "last"],
"space-before-blocks": "error",
"space-before-function-paren": ["error", "never"],
"space-in-parens": ["error", "never"],
"space-infix-ops": "error",
"space-unary-ops": "error",
"switch-colon-spacing": "error",
"template-curly-spacing": ["error", "never"],
"unicode-bom": "error",
},
globals: {
//script.js
gradioApp: "readonly",
executeCallbacks: "readonly",
onAfterUiUpdate: "readonly",
onOptionsChanged: "readonly",
onUiLoaded: "readonly",
onUiUpdate: "readonly",
uiCurrentTab: "writable",
uiElementInSight: "readonly",
uiElementIsVisible: "readonly",
//ui.js
opts: "writable",
all_gallery_buttons: "readonly",
selected_gallery_button: "readonly",
selected_gallery_index: "readonly",
switch_to_txt2img: "readonly",
switch_to_img2img_tab: "readonly",
switch_to_img2img: "readonly",
switch_to_sketch: "readonly",
switch_to_inpaint: "readonly",
switch_to_inpaint_sketch: "readonly",
switch_to_extras: "readonly",
get_tab_index: "readonly",
create_submit_args: "readonly",
restart_reload: "readonly",
updateInput: "readonly",
//extraNetworks.js
requestGet: "readonly",
popup: "readonly",
// from python
localization: "readonly",
// progrssbar.js
randomId: "readonly",
requestProgress: "readonly",
// imageviewer.js
modalPrevImage: "readonly",
modalNextImage: "readonly",
// token-counters.js
setupTokenCounters: "readonly",
}
};

2
.git-blame-ignore-revs Normal file
View File

@ -0,0 +1,2 @@
# Apply ESlint
9c54b78d9dde5601e916f308d9a9d6953ec39430

View File

@ -43,10 +43,19 @@ body:
- type: input - type: input
id: commit id: commit
attributes: attributes:
label: Commit where the problem happens label: Version or Commit where the problem happens
description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.) description: "Which webui version or commit are you running ? (Do not write *Latest Version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Version: v1.2.3** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)"
validations: validations:
required: true required: true
- type: dropdown
id: py-version
attributes:
label: What Python version are you running on ?
multiple: false
options:
- Python 3.10.x
- Python 3.11.x (above, no supported yet)
- Python 3.9.x (below, no recommended)
- type: dropdown - type: dropdown
id: platforms id: platforms
attributes: attributes:
@ -59,6 +68,35 @@ body:
- iOS - iOS
- Android - Android
- Other/Cloud - Other/Cloud
- type: dropdown
id: device
attributes:
label: What device are you running WebUI on?
multiple: true
options:
- Nvidia GPUs (RTX 20 above)
- Nvidia GPUs (GTX 16 below)
- AMD GPUs (RX 6000 above)
- AMD GPUs (RX 5000 below)
- CPU
- Other GPUs
- type: dropdown
id: cross_attention_opt
attributes:
label: Cross attention optimization
description: What cross attention optimization are you using, Settings -> Optimizations -> Cross attention optimization
multiple: false
options:
- Automatic
- xformers
- sdp-no-mem
- sdp
- Doggettx
- V1
- InvokeAI
- "None "
validations:
required: true
- type: dropdown - type: dropdown
id: browsers id: browsers
attributes: attributes:

View File

@ -1,28 +1,15 @@
# Please read the [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) before submitting a pull request! ## Description
If you have a large change, pay special attention to this paragraph: * a simple description of what you're trying to accomplish
* a summary of changes in code
* which issues it fixes, if any
> Before making changes, if you think that your feature will result in more than 100 lines changing, find me and talk to me about the feature you are proposing. It pains me to reject the hard work someone else did, but I won't add everything to the repo, and it's better if the rejection happens before you have to waste time working on the feature. ## Screenshots/videos:
Otherwise, after making sure you're following the rules described in wiki page, remove this section and continue on.
**Describe what this pull request is trying to achieve.** ## Checklist:
A clear and concise description of what you're trying to accomplish with this, so your intent doesn't have to be extracted from your code. - [ ] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)
- [ ] I have performed a self-review of my own code
**Additional notes and description of your changes** - [ ] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style)
- [ ] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests)
More technical discussion about your changes go here, plus anything that a maintainer might have to specifically take a look at, or be wary of.
**Environment this was tested in**
List the environment you have developed / tested this on. As per the contributing page, changes should be able to work on Windows out of the box.
- OS: [e.g. Windows, Linux]
- Browser: [e.g. chrome, safari]
- Graphics card: [e.g. NVIDIA RTX 2080 8GB, AMD RX 6600 8GB]
**Screenshots or videos of your changes**
If applicable, screenshots or a video showing off your changes. If it edits an existing UI, it should ideally contain a comparison of what used to be there, before your changes were made.
This is **required** for anything that touches the user interface.

View File

@ -1,39 +1,34 @@
# See https://github.com/actions/starter-workflows/blob/1067f16ad8a1eac328834e4b0ae24f7d206f810d/ci/pylint.yml for original reference file
name: Run Linting/Formatting on Pull Requests name: Run Linting/Formatting on Pull Requests
on: on:
- push - push
- pull_request - pull_request
# See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onpull_requestpull_request_targetbranchesbranches-ignore for syntax docs
# if you want to filter out branches, delete the `- pull_request` and uncomment these lines :
# pull_request:
# branches:
# - master
# branches-ignore:
# - development
jobs: jobs:
lint: lint-python:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout Code - name: Checkout Code
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Set up Python 3.10 - uses: actions/setup-python@v4
uses: actions/setup-python@v4
with: with:
python-version: 3.10.6 python-version: 3.11
cache: pip # NB: there's no cache: pip here since we're not installing anything
cache-dependency-path: | # from the requirements.txt file(s) in the repository; it's faster
**/requirements*txt # not to have GHA download an (at the time of writing) 4 GB cache
- name: Install PyLint # of PyTorch and other dependencies.
run: | - name: Install Ruff
python -m pip install --upgrade pip run: pip install ruff==0.0.272
pip install pylint - name: Run Ruff
# This lets PyLint check to see if it can resolve imports run: ruff .
- name: Install dependencies lint-js:
run: | runs-on: ubuntu-latest
export COMMANDLINE_ARGS="--skip-torch-cuda-test --exit" steps:
python launch.py - name: Checkout Code
- name: Analysing the code with pylint uses: actions/checkout@v3
run: | - name: Install Node.js
pylint $(git ls-files '*.py') uses: actions/setup-node@v3
with:
node-version: 18
- run: npm i --ci
- run: npm run lint

View File

@ -17,13 +17,54 @@ jobs:
cache: pip cache: pip
cache-dependency-path: | cache-dependency-path: |
**/requirements*txt **/requirements*txt
launch.py
- name: Install test dependencies
run: pip install wait-for-it -r requirements-test.txt
env:
PIP_DISABLE_PIP_VERSION_CHECK: "1"
PIP_PROGRESS_BAR: "off"
- name: Setup environment
run: python launch.py --skip-torch-cuda-test --exit
env:
PIP_DISABLE_PIP_VERSION_CHECK: "1"
PIP_PROGRESS_BAR: "off"
TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu
WEBUI_LAUNCH_LIVE_OUTPUT: "1"
PYTHONUNBUFFERED: "1"
- name: Start test server
run: >
python -m coverage run
--data-file=.coverage.server
launch.py
--skip-prepare-environment
--skip-torch-cuda-test
--test-server
--no-half
--disable-opt-split-attention
--use-cpu all
--api-server-stop
2>&1 | tee output.txt &
- name: Run tests - name: Run tests
run: python launch.py --tests test --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test run: |
- name: Upload main app stdout-stderr wait-for-it --service 127.0.0.1:7860 -t 600
python -m pytest -vv --junitxml=test/results.xml --cov . --cov-report=xml --verify-base-url test
- name: Kill test server
if: always()
run: curl -vv -XPOST http://127.0.0.1:7860/sdapi/v1/server-stop && sleep 10
- name: Show coverage
run: |
python -m coverage combine .coverage*
python -m coverage report -i
python -m coverage html -i
- name: Upload main app output
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
if: always() if: always()
with: with:
name: stdout-stderr name: output
path: | path: output.txt
test/stdout.txt - name: Upload coverage HTML
test/stderr.txt uses: actions/upload-artifact@v3
if: always()
with:
name: htmlcov
path: htmlcov

3
.gitignore vendored
View File

@ -34,3 +34,6 @@ notification.mp3
/test/stderr.txt /test/stderr.txt
/cache.json* /cache.json*
/config_states/ /config_states/
/node_modules
/package-lock.json
/.coverage*

View File

@ -1,56 +1,193 @@
## 1.4.0
### Features:
* zoom controls for inpainting
* run basic torch calculation at startup in parallel to reduce the performance impact of first generation
* option to pad prompt/neg prompt to be same length
* remove taming_transformers dependency
* custom k-diffusion scheduler settings
* add an option to show selected settings in main txt2img/img2img UI
* sysinfo tab in settings
* infer styles from prompts when pasting params into the UI
* an option to control the behavior of the above
### Minor:
* bump Gradio to 3.32.0
* bump xformers to 0.0.20
* Add option to disable token counters
* tooltip fixes & optimizations
* make it possible to configure filename for the zip download
* `[vae_filename]` pattern for filenames
* Revert discarding penultimate sigma for DPM-Solver++(2M) SDE
* change UI reorder setting to multiselect
* read version info form CHANGELOG.md if git version info is not available
* link footer API to Wiki when API is not active
* persistent conds cache (opt-in optimization)
### Extensions:
* After installing extensions, webui properly restarts the process rather than reloads the UI
* Added VAE listing to web API. Via: /sdapi/v1/sd-vae
* custom unet support
* Add onAfterUiUpdate callback
* refactor EmbeddingDatabase.register_embedding() to allow unregistering
* add before_process callback for scripts
* add ability for alwayson scripts to specify section and let user reorder those sections
### Bug Fixes:
* Fix dragging text to prompt
* fix incorrect quoting for infotext values with colon in them
* fix "hires. fix" prompt sharing same labels with txt2img_prompt
* Fix s_min_uncond default type int
* Fix for #10643 (Inpainting mask sometimes not working)
* fix bad styling for thumbs view in extra networks #10639
* fix for empty list of optimizations #10605
* small fixes to prepare_tcmalloc for Debian/Ubuntu compatibility
* fix --ui-debug-mode exit
* patch GitPython to not use leaky persistent processes
* fix duplicate Cross attention optimization after UI reload
* torch.cuda.is_available() check for SdOptimizationXformers
* fix hires fix using wrong conds in second pass if using Loras.
* handle exception when parsing generation parameters from png info
* fix upcast attention dtype error
* forcing Torch Version to 1.13.1 for RX 5000 series GPUs
* split mask blur into X and Y components, patch Outpainting MK2 accordingly
* don't die when a LoRA is a broken symlink
* allow activation of Generate Forever during generation
## 1.3.2
### Bug Fixes:
* fix files served out of tmp directory even if they are saved to disk
* fix postprocessing overwriting parameters
## 1.3.1
### Features:
* revert default cross attention optimization to Doggettx
### Bug Fixes:
* fix bug: LoRA don't apply on dropdown list sd_lora
* fix png info always added even if setting is not enabled
* fix some fields not applying in xyz plot
* fix "hires. fix" prompt sharing same labels with txt2img_prompt
* fix lora hashes not being added properly to infotex if there is only one lora
* fix --use-cpu failing to work properly at startup
* make --disable-opt-split-attention command line option work again
## 1.3.0
### Features:
* add UI to edit defaults
* token merging (via dbolya/tomesd)
* settings tab rework: add a lot of additional explanations and links
* load extensions' Git metadata in parallel to loading the main program to save a ton of time during startup
* update extensions table: show branch, show date in separate column, and show version from tags if available
* TAESD - another option for cheap live previews
* allow choosing sampler and prompts for second pass of hires fix - hidden by default, enabled in settings
* calculate hashes for Lora
* add lora hashes to infotext
* when pasting infotext, use infotext's lora hashes to find local loras for `<lora:xxx:1>` entries whose hashes match loras the user has
* select cross attention optimization from UI
### Minor:
* bump Gradio to 3.31.0
* bump PyTorch to 2.0.1 for macOS and Linux AMD
* allow setting defaults for elements in extensions' tabs
* allow selecting file type for live previews
* show "Loading..." for extra networks when displaying for the first time
* suppress ENSD infotext for samplers that don't use it
* clientside optimizations
* add options to show/hide hidden files and dirs in extra networks, and to not list models/files in hidden directories
* allow whitespace in styles.csv
* add option to reorder tabs
* move some functionality (swap resolution and set seed to -1) to client
* option to specify editor height for img2img
* button to copy image resolution into img2img width/height sliders
* switch from pyngrok to ngrok-py
* lazy-load images in extra networks UI
* set "Navigate image viewer with gamepad" option to false by default, by request
* change upscalers to download models into user-specified directory (from commandline args) rather than the default models/<...>
* allow hiding buttons in ui-config.json
### Extensions:
* add /sdapi/v1/script-info api
* use Ruff to lint Python code
* use ESlint to lint Javascript code
* add/modify CFG callbacks for Self-Attention Guidance extension
* add command and endpoint for graceful server stopping
* add some locals (prompts/seeds/etc) from processing function into the Processing class as fields
* rework quoting for infotext items that have commas in them to use JSON (should be backwards compatible except for cases where it didn't work previously)
* add /sdapi/v1/refresh-loras api checkpoint post request
* tests overhaul
### Bug Fixes:
* fix an issue preventing the program from starting if the user specifies a bad Gradio theme
* fix broken prompts from file script
* fix symlink scanning for extra networks
* fix --data-dir ignored when launching via webui-user.bat COMMANDLINE_ARGS
* allow web UI to be ran fully offline
* fix inability to run with --freeze-settings
* fix inability to merge checkpoint without adding metadata
* fix extra networks' save preview image not adding infotext for jpeg/webm
* remove blinking effect from text in hires fix and scale resolution preview
* make links to `http://<...>.git` extensions work in the extension tab
* fix bug with webui hanging at startup due to hanging git process
## 1.2.1 ## 1.2.1
### Features: ### Features:
* add an option to always refer to lora by filenames * add an option to always refer to LoRA by filenames
### Bug Fixes: ### Bug Fixes:
* never refer to lora by an alias if multiple loras have same alias or the alias is called none * never refer to LoRA by an alias if multiple LoRAs have same alias or the alias is called none
* fix upscalers disappearing after the user reloads UI * fix upscalers disappearing after the user reloads UI
* allow bf16 in safe unpickler (resolves problems with loading some loras) * allow bf16 in safe unpickler (resolves problems with loading some LoRAs)
* allow web UI to be ran fully offline * allow web UI to be ran fully offline
* fix localizations not working * fix localizations not working
* fix error for loras: 'LatentDiffusion' object has no attribute 'lora_layer_mapping' * fix error for LoRAs: `'LatentDiffusion' object has no attribute 'lora_layer_mapping'`
## 1.2.0 ## 1.2.0
### Features: ### Features:
* do not wait for stable diffusion model to load at startup * do not wait for Stable Diffusion model to load at startup
* add filename patterns: [denoising] * add filename patterns: `[denoising]`
* directory hiding for extra networks: dirs starting with . will hide their cards on extra network tabs unless specifically searched for * directory hiding for extra networks: dirs starting with `.` will hide their cards on extra network tabs unless specifically searched for
* Lora: for the `<...>` text in prompt, use name of Lora that is in the metdata of the file, if present, instead of filename (both can be used to activate lora) * LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metdata of the file, if present, instead of filename (both can be used to activate LoRA)
* Lora: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active * LoRA: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active
* Lora: Fix some Loras not working (ones that have 3x3 convolution layer) * LoRA: fix some LoRAs not working (ones that have 3x3 convolution layer)
* Lora: add an option to use old method of applying loras (producing same results as with kohya-ss) * LoRA: add an option to use old method of applying LoRAs (producing same results as with kohya-ss)
* add version to infotext, footer and console output when starting * add version to infotext, footer and console output when starting
* add links to wiki for filename pattern settings * add links to wiki for filename pattern settings
* add extended info for quicksettings setting and use multiselect input instead of a text field * add extended info for quicksettings setting and use multiselect input instead of a text field
### Minor: ### Minor:
* gradio bumped to 3.29.0 * bump Gradio to 3.29.0
* torch bumped to 2.0.1 * bump PyTorch to 2.0.1
* --subpath option for gradio for use with reverse proxy * `--subpath` option for gradio for use with reverse proxy
* linux/OSX: use existing virtualenv if already active (the VIRTUAL_ENV environment variable) * Linux/macOS: use existing virtualenv if already active (the VIRTUAL_ENV environment variable)
* possible frontend optimization: do not apply localizations if there are none * do not apply localizations if there are none (possible frontend optimization)
* Add extra `None` option for VAE in XYZ plot * add extra `None` option for VAE in XYZ plot
* print error to console when batch processing in img2img fails * print error to console when batch processing in img2img fails
* create HTML for extra network pages only on demand * create HTML for extra network pages only on demand
* allow directories starting with . to still list their models for lora, checkpoints, etc * allow directories starting with `.` to still list their models for LoRA, checkpoints, etc
* put infotext options into their own category in settings tab * put infotext options into their own category in settings tab
* do not show licenses page when user selects Show all pages in settings * do not show licenses page when user selects Show all pages in settings
### Extensions: ### Extensions:
* Tooltip localization support * tooltip localization support
* Add api method to get LoRA models with prompt * add API method to get LoRA models with prompt
### Bug Fixes: ### Bug Fixes:
* re-add /docs endpoint * re-add `/docs` endpoint
* fix gamepad navigation * fix gamepad navigation
* make the lightbox fullscreen image function properly * make the lightbox fullscreen image function properly
* fix squished thumbnails in extras tab * fix squished thumbnails in extras tab
* keep "search" filter for extra networks when user refreshes the tab (previously it showed everthing after you refreshed) * keep "search" filter for extra networks when user refreshes the tab (previously it showed everthing after you refreshed)
* fix webui showing the same image if you configure the generation to always save results into same file * fix webui showing the same image if you configure the generation to always save results into same file
* fix bug with upscalers not working properly * fix bug with upscalers not working properly
* Fix MPS on PyTorch 2.0.1, Intel Macs * fix MPS on PyTorch 2.0.1, Intel Macs
* make it so that custom context menu from contextMenu.js only disappears after user's click, ignoring non-user click events * make it so that custom context menu from contextMenu.js only disappears after user's click, ignoring non-user click events
* prevent Reload UI button/link from reloading the page when it's not yet ready * prevent Reload UI button/link from reloading the page when it's not yet ready
* fix prompts from file script failing to read contents from a drag/drop file * fix prompts from file script failing to read contents from a drag/drop file
@ -58,20 +195,20 @@
## 1.1.1 ## 1.1.1
### Bug Fixes: ### Bug Fixes:
* fix an error that prevents running webui on torch<2.0 without --disable-safe-unpickle * fix an error that prevents running webui on PyTorch<2.0 without --disable-safe-unpickle
## 1.1.0 ## 1.1.0
### Features: ### Features:
* switch to torch 2.0.0 (except for AMD GPUs) * switch to PyTorch 2.0.0 (except for AMD GPUs)
* visual improvements to custom code scripts * visual improvements to custom code scripts
* add filename patterns: [clip_skip], [hasprompt<>], [batch_number], [generation_number] * add filename patterns: `[clip_skip]`, `[hasprompt<>]`, `[batch_number]`, `[generation_number]`
* add support for saving init images in img2img, and record their hashes in infotext for reproducability * add support for saving init images in img2img, and record their hashes in infotext for reproducability
* automatically select current word when adjusting weight with ctrl+up/down * automatically select current word when adjusting weight with ctrl+up/down
* add dropdowns for X/Y/Z plot * add dropdowns for X/Y/Z plot
* setting: Stable Diffusion/Random number generator source: makes it possible to make images generated from a given manual seed consistent across different GPUs * add setting: Stable Diffusion/Random number generator source: makes it possible to make images generated from a given manual seed consistent across different GPUs
* support Gradio's theme API * support Gradio's theme API
* use TCMalloc on Linux by default; possible fix for memory leaks * use TCMalloc on Linux by default; possible fix for memory leaks
* (optimization) option to remove negative conditioning at low sigma values #9177 * add optimization option to remove negative conditioning at low sigma values #9177
* embed model merge metadata in .safetensors file * embed model merge metadata in .safetensors file
* extension settings backup/restore feature #9169 * extension settings backup/restore feature #9169
* add "resize by" and "resize to" tabs to img2img * add "resize by" and "resize to" tabs to img2img
@ -80,22 +217,22 @@
* button to restore the progress from session lost / tab reload * button to restore the progress from session lost / tab reload
### Minor: ### Minor:
* gradio bumped to 3.28.1 * bump Gradio to 3.28.1
* in extra tab, change extras "scale to" to sliders * change "scale to" to sliders in Extras tab
* add labels to tool buttons to make it possible to hide them * add labels to tool buttons to make it possible to hide them
* add tiled inference support for ScuNET * add tiled inference support for ScuNET
* add branch support for extension installation * add branch support for extension installation
* change linux installation script to insall into current directory rather than /home/username * change Linux installation script to install into current directory rather than `/home/username`
* sort textual inversion embeddings by name (case insensitive) * sort textual inversion embeddings by name (case-insensitive)
* allow styles.csv to be symlinked or mounted in docker * allow styles.csv to be symlinked or mounted in docker
* remove the "do not add watermark to images" option * remove the "do not add watermark to images" option
* make selected tab configurable with UI config * make selected tab configurable with UI config
* extra networks UI in now fixed height and scrollable * make the extra networks UI fixed height and scrollable
* add disable_tls_verify arg for use with self-signed certs * add `disable_tls_verify` arg for use with self-signed certs
### Extensions: ### Extensions:
* Add reload callback * add reload callback
* add is_hr_pass field for processing * add `is_hr_pass` field for processing
### Bug Fixes: ### Bug Fixes:
* fix broken batch image processing on 'Extras/Batch Process' tab * fix broken batch image processing on 'Extras/Batch Process' tab
@ -111,10 +248,10 @@
* one broken image in img2img batch won't stop all processing * one broken image in img2img batch won't stop all processing
* fix image orientation bug in train/preprocess * fix image orientation bug in train/preprocess
* fix Ngrok recreating tunnels every reload * fix Ngrok recreating tunnels every reload
* fix --realesrgan-models-path and --ldsr-models-path not working * fix `--realesrgan-models-path` and `--ldsr-models-path` not working
* fix --skip-install not working * fix `--skip-install` not working
* outpainting Mk2 & Poorman should use the SAMPLE file format to save images, not GRID file format * use SAMPLE file format in Outpainting Mk2 & Poorman
* do not fail all Loras if some have failed to load when making a picture * do not fail all LoRAs if some have failed to load when making a picture
## 1.0.0 ## 1.0.0
* everything * everything

View File

@ -15,7 +15,7 @@ A browser interface based on Gradio library for Stable Diffusion.
- Attention, specify parts of text that the model should pay more attention to - Attention, specify parts of text that the model should pay more attention to
- a man in a `((tuxedo))` - will pay more attention to tuxedo - a man in a `((tuxedo))` - will pay more attention to tuxedo
- a man in a `(tuxedo:1.21)` - alternative syntax - a man in a `(tuxedo:1.21)` - alternative syntax
- select text and press `Ctrl+Up` or `Ctrl+Down` to automatically adjust attention to selected text (code contributed by anonymous user) - select text and press `Ctrl+Up` or `Ctrl+Down` (or `Command+Up` or `Command+Down` if you're on a MacOS) to automatically adjust attention to selected text (code contributed by anonymous user)
- Loopback, run img2img processing multiple times - Loopback, run img2img processing multiple times
- X/Y/Z plot, a way to draw a 3 dimensional plot of images with different parameters - X/Y/Z plot, a way to draw a 3 dimensional plot of images with different parameters
- Textual Inversion - Textual Inversion
@ -99,6 +99,12 @@ Alternatively, use online services (like Google Colab):
- [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services)
### Installation on Windows 10/11 with NVidia-GPUs using release package
1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract it's contents.
2. Run `update.bat`.
3. Run `run.bat`.
> For more details see [Install-and-Run-on-NVidia-GPUs](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs)
### Automatic Installation on Windows ### Automatic Installation on Windows
1. Install [Python 3.10.6](https://www.python.org/downloads/release/python-3106/) (Newer version of Python does not support torch), checking "Add Python to PATH". 1. Install [Python 3.10.6](https://www.python.org/downloads/release/python-3106/) (Newer version of Python does not support torch), checking "Add Python to PATH".
2. Install [git](https://git-scm.com/download/win). 2. Install [git](https://git-scm.com/download/win).
@ -158,5 +164,6 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al
- Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://github.com/timothybrooks/instruct-pix2pix - Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://github.com/timothybrooks/instruct-pix2pix
- Security advice - RyotaK - Security advice - RyotaK
- UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC - UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC
- TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
- (You) - (You)

View File

@ -88,7 +88,7 @@ class LDSR:
x_t = None x_t = None
logs = None logs = None
for n in range(n_runs): for _ in range(n_runs):
if custom_shape is not None: if custom_shape is not None:
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device) x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0]) x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
@ -110,7 +110,6 @@ class LDSR:
diffusion_steps = int(steps) diffusion_steps = int(steps)
eta = 1.0 eta = 1.0
down_sample_method = 'Lanczos'
gc.collect() gc.collect()
if torch.cuda.is_available: if torch.cuda.is_available:
@ -158,7 +157,7 @@ class LDSR:
def get_cond(selected_path): def get_cond(selected_path):
example = dict() example = {}
up_f = 4 up_f = 4
c = selected_path.convert('RGB') c = selected_path.convert('RGB')
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0) c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
@ -196,7 +195,7 @@ def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_s
@torch.no_grad() @torch.no_grad()
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None, def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False): corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
log = dict() log = {}
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key, z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
return_first_stage_outputs=True, return_first_stage_outputs=True,
@ -244,7 +243,7 @@ def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True) x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
log["sample_noquant"] = x_sample_noquant log["sample_noquant"] = x_sample_noquant
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample) log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
except: except Exception:
pass pass
log["sample"] = x_sample log["sample"] = x_sample

View File

@ -1,13 +1,11 @@
import os import os
import sys
import traceback
from basicsr.utils.download_util import load_file_from_url
from modules.modelloader import load_file_from_url
from modules.upscaler import Upscaler, UpscalerData from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR from ldsr_model_arch import LDSR
from modules import shared, script_callbacks from modules import shared, script_callbacks, errors
import sd_hijack_autoencoder, sd_hijack_ddpm_v1 import sd_hijack_autoencoder # noqa: F401
import sd_hijack_ddpm_v1 # noqa: F401
class UpscalerLDSR(Upscaler): class UpscalerLDSR(Upscaler):
@ -44,22 +42,17 @@ class UpscalerLDSR(Upscaler):
if local_safetensors_path is not None and os.path.exists(local_safetensors_path): if local_safetensors_path is not None and os.path.exists(local_safetensors_path):
model = local_safetensors_path model = local_safetensors_path
else: else:
model = local_ckpt_path if local_ckpt_path is not None else load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="model.ckpt", progress=True) model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt")
yaml = local_yaml_path if local_yaml_path is not None else load_file_from_url(url=self.yaml_url, model_dir=self.model_path, file_name="project.yaml", progress=True) yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml")
try:
return LDSR(model, yaml) return LDSR(model, yaml)
except Exception:
print("Error importing LDSR:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return None
def do_upscale(self, img, path): def do_upscale(self, img, path):
try:
ldsr = self.load_model(path) ldsr = self.load_model(path)
if ldsr is None: except Exception:
print("NO LDSR!") errors.report(f"Failed loading LDSR model {path}", exc_info=True)
return img return img
ddim_steps = shared.opts.ldsr_steps ddim_steps = shared.opts.ldsr_steps
return ldsr.super_resolution(img, ddim_steps, self.scale) return ldsr.super_resolution(img, ddim_steps, self.scale)

View File

@ -1,16 +1,21 @@
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo # The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo # The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder # As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
import numpy as np
import torch import torch
import pytorch_lightning as pl import pytorch_lightning as pl
import torch.nn.functional as F import torch.nn.functional as F
from contextlib import contextmanager from contextlib import contextmanager
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
from torch.optim.lr_scheduler import LambdaLR
from ldm.modules.ema import LitEma
from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
from ldm.modules.diffusionmodules.model import Encoder, Decoder from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.util import instantiate_from_config from ldm.util import instantiate_from_config
import ldm.models.autoencoder import ldm.models.autoencoder
from packaging import version
class VQModel(pl.LightningModule): class VQModel(pl.LightningModule):
def __init__(self, def __init__(self,
@ -19,7 +24,7 @@ class VQModel(pl.LightningModule):
n_embed, n_embed,
embed_dim, embed_dim,
ckpt_path=None, ckpt_path=None,
ignore_keys=[], ignore_keys=None,
image_key="image", image_key="image",
colorize_nlabels=None, colorize_nlabels=None,
monitor=None, monitor=None,
@ -57,7 +62,7 @@ class VQModel(pl.LightningModule):
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
if ckpt_path is not None: if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [])
self.scheduler_config = scheduler_config self.scheduler_config = scheduler_config
self.lr_g_factor = lr_g_factor self.lr_g_factor = lr_g_factor
@ -76,18 +81,19 @@ class VQModel(pl.LightningModule):
if context is not None: if context is not None:
print(f"{context}: Restored training weights") print(f"{context}: Restored training weights")
def init_from_ckpt(self, path, ignore_keys=list()): def init_from_ckpt(self, path, ignore_keys=None):
sd = torch.load(path, map_location="cpu")["state_dict"] sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys()) keys = list(sd.keys())
for k in keys: for k in keys:
for ik in ignore_keys: for ik in ignore_keys or []:
if k.startswith(ik): if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k)) print("Deleting key {} from state_dict.".format(k))
del sd[k] del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False) missing, unexpected = self.load_state_dict(sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0: if missing:
print(f"Missing Keys: {missing}") print(f"Missing Keys: {missing}")
if unexpected:
print(f"Unexpected Keys: {unexpected}") print(f"Unexpected Keys: {unexpected}")
def on_train_batch_end(self, *args, **kwargs): def on_train_batch_end(self, *args, **kwargs):
@ -165,7 +171,7 @@ class VQModel(pl.LightningModule):
def validation_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx):
log_dict = self._validation_step(batch, batch_idx) log_dict = self._validation_step(batch, batch_idx)
with self.ema_scope(): with self.ema_scope():
log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") self._validation_step(batch, batch_idx, suffix="_ema")
return log_dict return log_dict
def _validation_step(self, batch, batch_idx, suffix=""): def _validation_step(self, batch, batch_idx, suffix=""):
@ -232,7 +238,7 @@ class VQModel(pl.LightningModule):
return self.decoder.conv_out.weight return self.decoder.conv_out.weight
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
log = dict() log = {}
x = self.get_input(batch, self.image_key) x = self.get_input(batch, self.image_key)
x = x.to(self.device) x = x.to(self.device)
if only_inputs: if only_inputs:
@ -249,7 +255,8 @@ class VQModel(pl.LightningModule):
if plot_ema: if plot_ema:
with self.ema_scope(): with self.ema_scope():
xrec_ema, _ = self(x) xrec_ema, _ = self(x)
if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) if x.shape[1] > 3:
xrec_ema = self.to_rgb(xrec_ema)
log["reconstructions_ema"] = xrec_ema log["reconstructions_ema"] = xrec_ema
return log return log
@ -264,7 +271,7 @@ class VQModel(pl.LightningModule):
class VQModelInterface(VQModel): class VQModelInterface(VQModel):
def __init__(self, embed_dim, *args, **kwargs): def __init__(self, embed_dim, *args, **kwargs):
super().__init__(embed_dim=embed_dim, *args, **kwargs) super().__init__(*args, embed_dim=embed_dim, **kwargs)
self.embed_dim = embed_dim self.embed_dim = embed_dim
def encode(self, x): def encode(self, x):
@ -282,5 +289,5 @@ class VQModelInterface(VQModel):
dec = self.decoder(quant) dec = self.decoder(quant)
return dec return dec
setattr(ldm.models.autoencoder, "VQModel", VQModel) ldm.models.autoencoder.VQModel = VQModel
setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface) ldm.models.autoencoder.VQModelInterface = VQModelInterface

View File

@ -48,7 +48,7 @@ class DDPMV1(pl.LightningModule):
beta_schedule="linear", beta_schedule="linear",
loss_type="l2", loss_type="l2",
ckpt_path=None, ckpt_path=None,
ignore_keys=[], ignore_keys=None,
load_only_unet=False, load_only_unet=False,
monitor="val/loss", monitor="val/loss",
use_ema=True, use_ema=True,
@ -100,7 +100,7 @@ class DDPMV1(pl.LightningModule):
if monitor is not None: if monitor is not None:
self.monitor = monitor self.monitor = monitor
if ckpt_path is not None: if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
@ -182,22 +182,22 @@ class DDPMV1(pl.LightningModule):
if context is not None: if context is not None:
print(f"{context}: Restored training weights") print(f"{context}: Restored training weights")
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
sd = torch.load(path, map_location="cpu") sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()): if "state_dict" in list(sd.keys()):
sd = sd["state_dict"] sd = sd["state_dict"]
keys = list(sd.keys()) keys = list(sd.keys())
for k in keys: for k in keys:
for ik in ignore_keys: for ik in ignore_keys or []:
if k.startswith(ik): if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k)) print("Deleting key {} from state_dict.".format(k))
del sd[k] del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
sd, strict=False) sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0: if missing:
print(f"Missing Keys: {missing}") print(f"Missing Keys: {missing}")
if len(unexpected) > 0: if unexpected:
print(f"Unexpected Keys: {unexpected}") print(f"Unexpected Keys: {unexpected}")
def q_mean_variance(self, x_start, t): def q_mean_variance(self, x_start, t):
@ -375,7 +375,7 @@ class DDPMV1(pl.LightningModule):
@torch.no_grad() @torch.no_grad()
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
log = dict() log = {}
x = self.get_input(batch, self.first_stage_key) x = self.get_input(batch, self.first_stage_key)
N = min(x.shape[0], N) N = min(x.shape[0], N)
n_row = min(x.shape[0], n_row) n_row = min(x.shape[0], n_row)
@ -383,7 +383,7 @@ class DDPMV1(pl.LightningModule):
log["inputs"] = x log["inputs"] = x
# get diffusion row # get diffusion row
diffusion_row = list() diffusion_row = []
x_start = x[:n_row] x_start = x[:n_row]
for t in range(self.num_timesteps): for t in range(self.num_timesteps):
@ -444,13 +444,13 @@ class LatentDiffusionV1(DDPMV1):
conditioning_key = None conditioning_key = None
ckpt_path = kwargs.pop("ckpt_path", None) ckpt_path = kwargs.pop("ckpt_path", None)
ignore_keys = kwargs.pop("ignore_keys", []) ignore_keys = kwargs.pop("ignore_keys", [])
super().__init__(conditioning_key=conditioning_key, *args, **kwargs) super().__init__(*args, conditioning_key=conditioning_key, **kwargs)
self.concat_mode = concat_mode self.concat_mode = concat_mode
self.cond_stage_trainable = cond_stage_trainable self.cond_stage_trainable = cond_stage_trainable
self.cond_stage_key = cond_stage_key self.cond_stage_key = cond_stage_key
try: try:
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
except: except Exception:
self.num_downs = 0 self.num_downs = 0
if not scale_by_std: if not scale_by_std:
self.scale_factor = scale_factor self.scale_factor = scale_factor
@ -877,16 +877,6 @@ class LatentDiffusionV1(DDPMV1):
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
return self.p_losses(x, c, t, *args, **kwargs) return self.p_losses(x, c, t, *args, **kwargs)
def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
def rescale_bbox(bbox):
x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
w = min(bbox[2] / crop_coordinates[2], 1 - x0)
h = min(bbox[3] / crop_coordinates[3], 1 - y0)
return x0, y0, w, h
return [rescale_bbox(b) for b in bboxes]
def apply_model(self, x_noisy, t, cond, return_ids=False): def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict): if isinstance(cond, dict):
@ -1126,7 +1116,7 @@ class LatentDiffusionV1(DDPMV1):
if cond is not None: if cond is not None:
if isinstance(cond, dict): if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond} [x[:batch_size] for x in cond[key]] for key in cond}
else: else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
@ -1157,8 +1147,10 @@ class LatentDiffusionV1(DDPMV1):
if i % log_every_t == 0 or i == timesteps - 1: if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(x0_partial) intermediates.append(x0_partial)
if callback: callback(i) if callback:
if img_callback: img_callback(img, i) callback(i)
if img_callback:
img_callback(img, i)
return img, intermediates return img, intermediates
@torch.no_grad() @torch.no_grad()
@ -1205,8 +1197,10 @@ class LatentDiffusionV1(DDPMV1):
if i % log_every_t == 0 or i == timesteps - 1: if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(img) intermediates.append(img)
if callback: callback(i) if callback:
if img_callback: img_callback(img, i) callback(i)
if img_callback:
img_callback(img, i)
if return_intermediates: if return_intermediates:
return img, intermediates return img, intermediates
@ -1221,7 +1215,7 @@ class LatentDiffusionV1(DDPMV1):
if cond is not None: if cond is not None:
if isinstance(cond, dict): if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond} [x[:batch_size] for x in cond[key]] for key in cond}
else: else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
return self.p_sample_loop(cond, return self.p_sample_loop(cond,
@ -1253,7 +1247,7 @@ class LatentDiffusionV1(DDPMV1):
use_ddim = ddim_steps is not None use_ddim = ddim_steps is not None
log = dict() log = {}
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
return_first_stage_outputs=True, return_first_stage_outputs=True,
force_c_encode=True, force_c_encode=True,
@ -1280,7 +1274,7 @@ class LatentDiffusionV1(DDPMV1):
if plot_diffusion_rows: if plot_diffusion_rows:
# get diffusion row # get diffusion row
diffusion_row = list() diffusion_row = []
z_start = z[:n_row] z_start = z[:n_row]
for t in range(self.num_timesteps): for t in range(self.num_timesteps):
if t % self.log_every_t == 0 or t == self.num_timesteps - 1: if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
@ -1322,7 +1316,7 @@ class LatentDiffusionV1(DDPMV1):
if inpaint: if inpaint:
# make a simple center square # make a simple center square
b, h, w = z.shape[0], z.shape[2], z.shape[3] h, w = z.shape[2], z.shape[3]
mask = torch.ones(N, h, w).to(self.device) mask = torch.ones(N, h, w).to(self.device)
# zeros will be filled in # zeros will be filled in
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
@ -1424,10 +1418,10 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1):
# TODO: move all layout-specific hacks to this class # TODO: move all layout-specific hacks to this class
def __init__(self, cond_stage_key, *args, **kwargs): def __init__(self, cond_stage_key, *args, **kwargs):
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
def log_images(self, batch, N=8, *args, **kwargs): def log_images(self, batch, N=8, *args, **kwargs):
logs = super().log_images(batch=batch, N=N, *args, **kwargs) logs = super().log_images(*args, batch=batch, N=N, **kwargs)
key = 'train' if self.training else 'validation' key = 'train' if self.training else 'validation'
dset = self.trainer.datamodule.datasets[key] dset = self.trainer.datamodule.datasets[key]
@ -1443,7 +1437,7 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1):
logs['bbox_image'] = cond_img logs['bbox_image'] = cond_img
return logs return logs
setattr(ldm.models.diffusion.ddpm, "DDPMV1", DDPMV1) ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
setattr(ldm.models.diffusion.ddpm, "LatentDiffusionV1", LatentDiffusionV1) ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
setattr(ldm.models.diffusion.ddpm, "DiffusionWrapperV1", DiffusionWrapperV1) ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
setattr(ldm.models.diffusion.ddpm, "Layout2ImgDiffusionV1", Layout2ImgDiffusionV1) ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1

View File

@ -0,0 +1,147 @@
# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
# where the license is as follows:
#
# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE./
import torch
import torch.nn as nn
import numpy as np
from einops import rearrange
class VectorQuantizer2(nn.Module):
"""
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
"""
# NOTE: due to a bug the beta term was applied to the wrong term. for
# backwards compatibility we use the buggy version by default, but you can
# specify legacy=False to fix it.
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
sane_index_shape=False, legacy=True):
super().__init__()
self.n_e = n_e
self.e_dim = e_dim
self.beta = beta
self.legacy = legacy
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed + 1
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
self.re_embed = n_e
self.sane_index_shape = sane_index_shape
def remap_to_used(self, inds):
ishape = inds.shape
assert len(ishape) > 1
inds = inds.reshape(ishape[0], -1)
used = self.used.to(inds)
match = (inds[:, :, None] == used[None, None, ...]).long()
new = match.argmax(-1)
unknown = match.sum(2) < 1
if self.unknown_index == "random":
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape)
def unmap_to_all(self, inds):
ishape = inds.shape
assert len(ishape) > 1
inds = inds.reshape(ishape[0], -1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
return back.reshape(ishape)
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
assert rescale_logits is False, "Only for interface compatible with Gumbel"
assert return_logits is False, "Only for interface compatible with Gumbel"
# reshape z -> (batch, height, width, channel) and flatten
z = rearrange(z, 'b c h w -> b h w c').contiguous()
z_flattened = z.view(-1, self.e_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
min_encoding_indices = torch.argmin(d, dim=1)
z_q = self.embedding(min_encoding_indices).view(z.shape)
perplexity = None
min_encodings = None
# compute loss for embedding
if not self.legacy:
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
torch.mean((z_q - z.detach()) ** 2)
else:
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q = z + (z_q - z).detach()
# reshape back to match original input shape
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
if self.remap is not None:
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
min_encoding_indices = self.remap_to_used(min_encoding_indices)
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
if self.sane_index_shape:
min_encoding_indices = min_encoding_indices.reshape(
z_q.shape[0], z_q.shape[2], z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def get_codebook_entry(self, indices, shape):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
indices = indices.reshape(shape[0], -1) # add batch axis
indices = self.unmap_to_all(indices)
indices = indices.reshape(-1) # flatten again
# get quantized latent vectors
z_q = self.embedding(indices)
if shape is not None:
z_q = z_q.view(shape)
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q

View File

@ -9,19 +9,37 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
def activate(self, p, params_list): def activate(self, p, params_list):
additional = shared.opts.sd_lora additional = shared.opts.sd_lora
if additional != "None" and additional in lora.available_loras and len([x for x in params_list if x.items[0] == additional]) == 0: if additional != "None" and additional in lora.available_loras and not any(x for x in params_list if x.items[0] == additional):
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts] p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
names = [] names = []
multipliers = [] multipliers = []
for params in params_list: for params in params_list:
assert len(params.items) > 0 assert params.items
names.append(params.items[0]) names.append(params.items[0])
multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0) multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)
lora.load_loras(names, multipliers) lora.load_loras(names, multipliers)
if shared.opts.lora_add_hashes_to_infotext:
lora_hashes = []
for item in lora.loaded_loras:
shorthash = item.lora_on_disk.shorthash
if not shorthash:
continue
alias = item.mentioned_name
if not alias:
continue
alias = alias.replace(":", "").replace(",", "")
lora_hashes.append(f"{alias}: {shorthash}")
if lora_hashes:
p.extra_generation_params["Lora hashes"] = ", ".join(lora_hashes)
def deactivate(self, p): def deactivate(self, p):
pass pass

View File

@ -1,10 +1,9 @@
import glob
import os import os
import re import re
import torch import torch
from typing import Union from typing import Union
from modules import shared, devices, sd_models, errors, scripts from modules import shared, devices, sd_models, errors, scripts, sd_hijack, hashes
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20} metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
@ -77,9 +76,9 @@ class LoraOnDisk:
self.name = name self.name = name
self.filename = filename self.filename = filename
self.metadata = {} self.metadata = {}
self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
_, ext = os.path.splitext(filename) if self.is_safetensors:
if ext.lower() == ".safetensors":
try: try:
self.metadata = sd_models.read_metadata_from_safetensors(filename) self.metadata = sd_models.read_metadata_from_safetensors(filename)
except Exception as e: except Exception as e:
@ -95,14 +94,43 @@ class LoraOnDisk:
self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text
self.alias = self.metadata.get('ss_output_name', self.name) self.alias = self.metadata.get('ss_output_name', self.name)
self.hash = None
self.shorthash = None
self.set_hash(
self.metadata.get('sshs_model_hash') or
hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or
''
)
def set_hash(self, v):
self.hash = v
self.shorthash = self.hash[0:12]
if self.shorthash:
available_lora_hash_lookup[self.shorthash] = self
def read_hash(self):
if not self.hash:
self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '')
def get_alias(self):
if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in forbidden_lora_aliases:
return self.name
else:
return self.alias
class LoraModule: class LoraModule:
def __init__(self, name): def __init__(self, name, lora_on_disk: LoraOnDisk):
self.name = name self.name = name
self.lora_on_disk = lora_on_disk
self.multiplier = 1.0 self.multiplier = 1.0
self.modules = {} self.modules = {}
self.mtime = None self.mtime = None
self.mentioned_name = None
"""the text that was used to add lora to prompt - can be either name or an alias"""
class LoraUpDownModule: class LoraUpDownModule:
def __init__(self): def __init__(self):
@ -127,11 +155,11 @@ def assign_lora_names_to_compvis_modules(sd_model):
sd_model.lora_layer_mapping = lora_layer_mapping sd_model.lora_layer_mapping = lora_layer_mapping
def load_lora(name, filename): def load_lora(name, lora_on_disk):
lora = LoraModule(name) lora = LoraModule(name, lora_on_disk)
lora.mtime = os.path.getmtime(filename) lora.mtime = os.path.getmtime(lora_on_disk.filename)
sd = sd_models.read_state_dict(filename) sd = sd_models.read_state_dict(lora_on_disk.filename)
# this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0 # this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0
if not hasattr(shared.sd_model, 'lora_layer_mapping'): if not hasattr(shared.sd_model, 'lora_layer_mapping'):
@ -177,7 +205,7 @@ def load_lora(name, filename):
else: else:
print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}') print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
continue continue
assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}' raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}")
with torch.no_grad(): with torch.no_grad():
module.weight.copy_(weight) module.weight.copy_(weight)
@ -189,10 +217,10 @@ def load_lora(name, filename):
elif lora_key == "lora_down.weight": elif lora_key == "lora_down.weight":
lora_module.down = module lora_module.down = module
else: else:
assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha' raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha")
if len(keys_failed_to_match) > 0: if keys_failed_to_match:
print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}") print(f"Failed to match keys when loading Lora {lora_on_disk.filename}: {keys_failed_to_match}")
return lora return lora
@ -207,30 +235,41 @@ def load_loras(names, multipliers=None):
loaded_loras.clear() loaded_loras.clear()
loras_on_disk = [available_lora_aliases.get(name, None) for name in names] loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
if any([x is None for x in loras_on_disk]): if any(x is None for x in loras_on_disk):
list_available_loras() list_available_loras()
loras_on_disk = [available_lora_aliases.get(name, None) for name in names] loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
failed_to_load_loras = []
for i, name in enumerate(names): for i, name in enumerate(names):
lora = already_loaded.get(name, None) lora = already_loaded.get(name, None)
lora_on_disk = loras_on_disk[i] lora_on_disk = loras_on_disk[i]
if lora_on_disk is not None: if lora_on_disk is not None:
if lora is None or os.path.getmtime(lora_on_disk.filename) > lora.mtime: if lora is None or os.path.getmtime(lora_on_disk.filename) > lora.mtime:
try: try:
lora = load_lora(name, lora_on_disk.filename) lora = load_lora(name, lora_on_disk)
except Exception as e: except Exception as e:
errors.display(e, f"loading Lora {lora_on_disk.filename}") errors.display(e, f"loading Lora {lora_on_disk.filename}")
continue continue
lora.mentioned_name = name
lora_on_disk.read_hash()
if lora is None: if lora is None:
failed_to_load_loras.append(name)
print(f"Couldn't find Lora with name {name}") print(f"Couldn't find Lora with name {name}")
continue continue
lora.multiplier = multipliers[i] if multipliers else 1.0 lora.multiplier = multipliers[i] if multipliers else 1.0
loaded_loras.append(lora) loaded_loras.append(lora)
if failed_to_load_loras:
sd_hijack.model_hijack.comments.append("Failed to find Loras: " + ", ".join(failed_to_load_loras))
def lora_calc_updown(lora, module, target): def lora_calc_updown(lora, module, target):
with torch.no_grad(): with torch.no_grad():
@ -314,7 +353,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu
print(f'failed to calculate lora weights for layer {lora_layer_name}') print(f'failed to calculate lora weights for layer {lora_layer_name}')
setattr(self, "lora_current_names", wanted_names) self.lora_current_names = wanted_names
def lora_forward(module, input, original_forward): def lora_forward(module, input, original_forward):
@ -348,8 +387,8 @@ def lora_forward(module, input, original_forward):
def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
setattr(self, "lora_current_names", ()) self.lora_current_names = ()
setattr(self, "lora_weights_backup", None) self.lora_weights_backup = None
def lora_Linear_forward(self, input): def lora_Linear_forward(self, input):
@ -398,17 +437,22 @@ def list_available_loras():
available_loras.clear() available_loras.clear()
available_lora_aliases.clear() available_lora_aliases.clear()
forbidden_lora_aliases.clear() forbidden_lora_aliases.clear()
forbidden_lora_aliases.update({"none": 1}) available_lora_hash_lookup.clear()
forbidden_lora_aliases.update({"none": 1, "Addams": 1})
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True) os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
for filename in sorted(candidates, key=str.lower): for filename in candidates:
if os.path.isdir(filename): if os.path.isdir(filename):
continue continue
name = os.path.splitext(os.path.basename(filename))[0] name = os.path.splitext(os.path.basename(filename))[0]
try:
entry = LoraOnDisk(name, filename) entry = LoraOnDisk(name, filename)
except OSError: # should catch FileNotFoundError and PermissionError etc.
errors.report(f"Failed to load LoRA {name} from {filename}", exc_info=True)
continue
available_loras[name] = entry available_loras[name] = entry
@ -428,7 +472,7 @@ def infotext_pasted(infotext, params):
added = [] added = []
for k, v in params.items(): for k in params:
if not k.startswith("AddNet Model "): if not k.startswith("AddNet Model "):
continue continue
@ -452,8 +496,10 @@ def infotext_pasted(infotext, params):
if added: if added:
params["Prompt"] += "\n" + "".join(added) params["Prompt"] += "\n" + "".join(added)
available_loras = {} available_loras = {}
available_lora_aliases = {} available_lora_aliases = {}
available_lora_hash_lookup = {}
forbidden_lora_aliases = {} forbidden_lora_aliases = {}
loaded_loras = [] loaded_loras = []

View File

@ -1,3 +1,5 @@
import re
import torch import torch
import gradio as gr import gradio as gr
from fastapi import FastAPI from fastapi import FastAPI
@ -53,8 +55,9 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted)
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
"sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras), "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras),
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}), "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
})) }))
@ -77,6 +80,37 @@ def api_loras(_: gr.Blocks, app: FastAPI):
async def get_loras(): async def get_loras():
return [create_lora_json(obj) for obj in lora.available_loras.values()] return [create_lora_json(obj) for obj in lora.available_loras.values()]
@app.post("/sdapi/v1/refresh-loras")
async def refresh_loras():
return lora.list_available_loras()
script_callbacks.on_app_started(api_loras) script_callbacks.on_app_started(api_loras)
re_lora = re.compile("<lora:([^:]+):")
def infotext_pasted(infotext, d):
hashes = d.get("Lora hashes")
if not hashes:
return
hashes = [x.strip().split(':', 1) for x in hashes.split(",")]
hashes = {x[0].strip().replace(",", ""): x[1].strip() for x in hashes}
def lora_replacement(m):
alias = m.group(1)
shorthash = hashes.get(alias)
if shorthash is None:
return m.group(0)
lora_on_disk = lora.available_lora_hash_lookup.get(shorthash)
if lora_on_disk is None:
return m.group(0)
return f'<lora:{lora_on_disk.get_alias()}:'
d["Prompt"] = re.sub(re_lora, lora_replacement, d["Prompt"])
script_callbacks.on_infotext_pasted(infotext_pasted)

View File

@ -13,13 +13,10 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
lora.list_available_loras() lora.list_available_loras()
def list_items(self): def list_items(self):
for name, lora_on_disk in lora.available_loras.items(): for index, (name, lora_on_disk) in enumerate(lora.available_loras.items()):
path, ext = os.path.splitext(lora_on_disk.filename) path, ext = os.path.splitext(lora_on_disk.filename)
if shared.opts.lora_preferred_name == "Filename" or lora_on_disk.alias.lower() in lora.forbidden_lora_aliases: alias = lora_on_disk.get_alias()
alias = name
else:
alias = lora_on_disk.alias
yield { yield {
"name": name, "name": name,
@ -30,6 +27,8 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
"prompt": json.dumps(f"<lora:{alias}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"), "prompt": json.dumps(f"<lora:{alias}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
"local_preview": f"{path}.{shared.opts.samples_format}", "local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": json.dumps(lora_on_disk.metadata, indent=4) if lora_on_disk.metadata else None, "metadata": json.dumps(lora_on_disk.metadata, indent=4) if lora_on_disk.metadata else None,
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
} }
def allowed_directories_for_previews(self): def allowed_directories_for_previews(self):

View File

@ -1,19 +1,16 @@
import os.path
import sys import sys
import traceback
import PIL.Image import PIL.Image
import numpy as np import numpy as np
import torch import torch
from tqdm import tqdm from tqdm import tqdm
from basicsr.utils.download_util import load_file_from_url
import modules.upscaler import modules.upscaler
from modules import devices, modelloader from modules import devices, modelloader, script_callbacks, errors
from scunet_model_arch import SCUNet as net from scunet_model_arch import SCUNet
from modules.modelloader import load_file_from_url
from modules.shared import opts from modules.shared import opts
from modules import images
class UpscalerScuNET(modules.upscaler.Upscaler): class UpscalerScuNET(modules.upscaler.Upscaler):
@ -29,7 +26,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
scalers = [] scalers = []
add_model2 = True add_model2 = True
for file in model_paths: for file in model_paths:
if "http" in file: if file.startswith("http"):
name = self.model_name name = self.model_name
else: else:
name = modelloader.friendly_name(file) name = modelloader.friendly_name(file)
@ -39,8 +36,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
scaler_data = modules.upscaler.UpscalerData(name, file, self, 4) scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
scalers.append(scaler_data) scalers.append(scaler_data)
except Exception: except Exception:
print(f"Error loading ScuNET model: {file}", file=sys.stderr) errors.report(f"Error loading ScuNET model: {file}", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
if add_model2: if add_model2:
scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self) scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
scalers.append(scaler_data2) scalers.append(scaler_data2)
@ -91,9 +87,10 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
torch.cuda.empty_cache() torch.cuda.empty_cache()
try:
model = self.load_model(selected_file) model = self.load_model(selected_file)
if model is None: except Exception as e:
print(f"ScuNET: Unable to load model from {selected_file}", file=sys.stderr) print(f"ScuNET: Unable to load model from {selected_file}: {e}", file=sys.stderr)
return img return img
device = devices.get_device_for('scunet') device = devices.get_device_for('scunet')
@ -121,20 +118,27 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
def load_model(self, path: str): def load_model(self, path: str):
device = devices.get_device_for('scunet') device = devices.get_device_for('scunet')
if "http" in path: if path.startswith("http"):
filename = load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="%s.pth" % self.name, # TODO: this doesn't use `path` at all?
progress=True) filename = load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth")
else: else:
filename = path filename = path
if not os.path.exists(os.path.join(self.model_path, filename)) or filename is None: model = SCUNet(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
print(f"ScuNET: Unable to load model from {filename}", file=sys.stderr)
return None
model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
model.load_state_dict(torch.load(filename), strict=True) model.load_state_dict(torch.load(filename), strict=True)
model.eval() model.eval()
for k, v in model.named_parameters(): for _, v in model.named_parameters():
v.requires_grad = False v.requires_grad = False
model = model.to(device) model = model.to(device)
return model return model
def on_ui_settings():
import gradio as gr
from modules import shared
shared.opts.add_option("SCUNET_tile", shared.OptionInfo(256, "Tile size for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")).info("0 = no tiling"))
shared.opts.add_option("SCUNET_tile_overlap", shared.OptionInfo(8, "Tile overlap for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, section=('upscaling', "Upscaling")).info("Low values = visible seam"))
script_callbacks.on_ui_settings(on_ui_settings)

View File

@ -61,7 +61,9 @@ class WMSA(nn.Module):
Returns: Returns:
output: tensor shape [b h w c] output: tensor shape [b h w c]
""" """
if self.type != 'W': x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2)) if self.type != 'W':
x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size) x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
h_windows = x.size(1) h_windows = x.size(1)
w_windows = x.size(2) w_windows = x.size(2)
@ -85,8 +87,9 @@ class WMSA(nn.Module):
output = self.linear(output) output = self.linear(output)
output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size) output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
if self.type != 'W': output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), if self.type != 'W':
dims=(1, 2)) output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2))
return output return output
def relative_embedding(self): def relative_embedding(self):

View File

@ -1,18 +1,17 @@
import contextlib import sys
import os
import numpy as np import numpy as np
import torch import torch
from PIL import Image from PIL import Image
from basicsr.utils.download_util import load_file_from_url
from tqdm import tqdm from tqdm import tqdm
from modules import modelloader, devices, script_callbacks, shared from modules import modelloader, devices, script_callbacks, shared
from modules.shared import cmd_opts, opts, state from modules.shared import opts, state
from swinir_model_arch import SwinIR as net from swinir_model_arch import SwinIR
from swinir_model_arch_v2 import Swin2SR as net2 from swinir_model_arch_v2 import Swin2SR
from modules.upscaler import Upscaler, UpscalerData from modules.upscaler import Upscaler, UpscalerData
SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth"
device_swinir = devices.get_device_for('swinir') device_swinir = devices.get_device_for('swinir')
@ -20,16 +19,14 @@ device_swinir = devices.get_device_for('swinir')
class UpscalerSwinIR(Upscaler): class UpscalerSwinIR(Upscaler):
def __init__(self, dirname): def __init__(self, dirname):
self.name = "SwinIR" self.name = "SwinIR"
self.model_url = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0" \ self.model_url = SWINIR_MODEL_URL
"/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR" \
"-L_x4_GAN.pth "
self.model_name = "SwinIR 4x" self.model_name = "SwinIR 4x"
self.user_path = dirname self.user_path = dirname
super().__init__() super().__init__()
scalers = [] scalers = []
model_files = self.find_models(ext_filter=[".pt", ".pth"]) model_files = self.find_models(ext_filter=[".pt", ".pth"])
for model in model_files: for model in model_files:
if "http" in model: if model.startswith("http"):
name = self.model_name name = self.model_name
else: else:
name = modelloader.friendly_name(model) name = modelloader.friendly_name(model)
@ -38,27 +35,30 @@ class UpscalerSwinIR(Upscaler):
self.scalers = scalers self.scalers = scalers
def do_upscale(self, img, model_file): def do_upscale(self, img, model_file):
try:
model = self.load_model(model_file) model = self.load_model(model_file)
if model is None: except Exception as e:
print(f"Failed loading SwinIR model {model_file}: {e}", file=sys.stderr)
return img return img
model = model.to(device_swinir, dtype=devices.dtype) model = model.to(device_swinir, dtype=devices.dtype)
img = upscale(img, model) img = upscale(img, model)
try: try:
torch.cuda.empty_cache() torch.cuda.empty_cache()
except: except Exception:
pass pass
return img return img
def load_model(self, path, scale=4): def load_model(self, path, scale=4):
if "http" in path: if path.startswith("http"):
dl_name = "%s%s" % (self.model_name.replace(" ", "_"), ".pth") filename = modelloader.load_file_from_url(
filename = load_file_from_url(url=path, model_dir=self.model_path, file_name=dl_name, progress=True) url=path,
model_dir=self.model_download_path,
file_name=f"{self.model_name.replace(' ', '_')}.pth",
)
else: else:
filename = path filename = path
if filename is None or not os.path.exists(filename):
return None
if filename.endswith(".v2.pth"): if filename.endswith(".v2.pth"):
model = net2( model = Swin2SR(
upscale=scale, upscale=scale,
in_chans=3, in_chans=3,
img_size=64, img_size=64,
@ -73,7 +73,7 @@ class UpscalerSwinIR(Upscaler):
) )
params = None params = None
else: else:
model = net( model = SwinIR(
upscale=scale, upscale=scale,
in_chans=3, in_chans=3,
img_size=64, img_size=64,

View File

@ -644,7 +644,7 @@ class SwinIR(nn.Module):
""" """
def __init__(self, img_size=64, patch_size=1, in_chans=3, def __init__(self, img_size=64, patch_size=1, in_chans=3,
embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6], embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True, norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
@ -844,7 +844,7 @@ class SwinIR(nn.Module):
H, W = self.patches_resolution H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9 flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops() flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers): for layer in self.layers:
flops += layer.flops() flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops() flops += self.upsample.flops()

View File

@ -74,7 +74,7 @@ class WindowAttention(nn.Module):
""" """
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0., def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.,
pretrained_window_size=[0, 0]): pretrained_window_size=(0, 0)):
super().__init__() super().__init__()
self.dim = dim self.dim = dim
@ -698,7 +698,7 @@ class Swin2SR(nn.Module):
""" """
def __init__(self, img_size=64, patch_size=1, in_chans=3, def __init__(self, img_size=64, patch_size=1, in_chans=3,
embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6], embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
window_size=7, mlp_ratio=4., qkv_bias=True, window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True, norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
@ -994,7 +994,7 @@ class Swin2SR(nn.Module):
H, W = self.patches_resolution H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9 flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops() flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers): for layer in self.layers:
flops += layer.flops() flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops() flops += self.upsample.flops()

View File

@ -0,0 +1,776 @@
onUiLoaded(async() => {
const elementIDs = {
img2imgTabs: "#mode_img2img .tab-nav",
inpaint: "#img2maskimg",
inpaintSketch: "#inpaint_sketch",
rangeGroup: "#img2img_column_size",
sketch: "#img2img_sketch"
};
const tabNameToElementId = {
"Inpaint sketch": elementIDs.inpaintSketch,
"Inpaint": elementIDs.inpaint,
"Sketch": elementIDs.sketch
};
// Helper functions
// Get active tab
function getActiveTab(elements, all = false) {
const tabs = elements.img2imgTabs.querySelectorAll("button");
if (all) return tabs;
for (let tab of tabs) {
if (tab.classList.contains("selected")) {
return tab;
}
}
}
// Get tab ID
function getTabId(elements) {
const activeTab = getActiveTab(elements);
return tabNameToElementId[activeTab.innerText];
}
// Wait until opts loaded
async function waitForOpts() {
for (;;) {
if (window.opts && Object.keys(window.opts).length) {
return window.opts;
}
await new Promise(resolve => setTimeout(resolve, 100));
}
}
// Function for defining the "Ctrl", "Shift" and "Alt" keys
function isModifierKey(event, key) {
switch (key) {
case "Ctrl":
return event.ctrlKey;
case "Shift":
return event.shiftKey;
case "Alt":
return event.altKey;
default:
return false;
}
}
// Check if hotkey is valid
function isValidHotkey(value) {
const specialKeys = ["Ctrl", "Alt", "Shift", "Disable"];
return (
(typeof value === "string" &&
value.length === 1 &&
/[a-z]/i.test(value)) ||
specialKeys.includes(value)
);
}
// Normalize hotkey
function normalizeHotkey(hotkey) {
return hotkey.length === 1 ? "Key" + hotkey.toUpperCase() : hotkey;
}
// Format hotkey for display
function formatHotkeyForDisplay(hotkey) {
return hotkey.startsWith("Key") ? hotkey.slice(3) : hotkey;
}
// Create hotkey configuration with the provided options
function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) {
const result = {}; // Resulting hotkey configuration
const usedKeys = new Set(); // Set of used hotkeys
// Iterate through defaultHotkeysConfig keys
for (const key in defaultHotkeysConfig) {
const userValue = hotkeysConfigOpts[key]; // User-provided hotkey value
const defaultValue = defaultHotkeysConfig[key]; // Default hotkey value
// Apply appropriate value for undefined, boolean, or object userValue
if (
userValue === undefined ||
typeof userValue === "boolean" ||
typeof userValue === "object" ||
userValue === "disable"
) {
result[key] =
userValue === undefined ? defaultValue : userValue;
} else if (isValidHotkey(userValue)) {
const normalizedUserValue = normalizeHotkey(userValue);
// Check for conflicting hotkeys
if (!usedKeys.has(normalizedUserValue)) {
usedKeys.add(normalizedUserValue);
result[key] = normalizedUserValue;
} else {
console.error(
`Hotkey: ${formatHotkeyForDisplay(
userValue
)} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay(
defaultValue
)}`
);
result[key] = defaultValue;
}
} else {
console.error(
`Hotkey: ${formatHotkeyForDisplay(
userValue
)} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay(
defaultValue
)}`
);
result[key] = defaultValue;
}
}
return result;
}
// Disables functions in the config object based on the provided list of function names
function disableFunctions(config, disabledFunctions) {
// Bind the hasOwnProperty method to the functionMap object to avoid errors
const hasOwnProperty =
Object.prototype.hasOwnProperty.bind(functionMap);
// Loop through the disabledFunctions array and disable the corresponding functions in the config object
disabledFunctions.forEach(funcName => {
if (hasOwnProperty(funcName)) {
const key = functionMap[funcName];
config[key] = "disable";
}
});
// Return the updated config object
return config;
}
/**
* The restoreImgRedMask function displays a red mask around an image to indicate the aspect ratio.
* If the image display property is set to 'none', the mask breaks. To fix this, the function
* temporarily sets the display property to 'block' and then hides the mask again after 300 milliseconds
* to avoid breaking the canvas. Additionally, the function adjusts the mask to work correctly on
* very long images.
*/
function restoreImgRedMask(elements) {
const mainTabId = getTabId(elements);
if (!mainTabId) return;
const mainTab = gradioApp().querySelector(mainTabId);
const img = mainTab.querySelector("img");
const imageARPreview = gradioApp().querySelector("#imageARPreview");
if (!img || !imageARPreview) return;
imageARPreview.style.transform = "";
if (parseFloat(mainTab.style.width) > 865) {
const transformString = mainTab.style.transform;
const scaleMatch = transformString.match(
/scale\(([-+]?[0-9]*\.?[0-9]+)\)/
);
let zoom = 1; // default zoom
if (scaleMatch && scaleMatch[1]) {
zoom = Number(scaleMatch[1]);
}
imageARPreview.style.transformOrigin = "0 0";
imageARPreview.style.transform = `scale(${zoom})`;
}
if (img.style.display !== "none") return;
img.style.display = "block";
setTimeout(() => {
img.style.display = "none";
}, 400);
}
const hotkeysConfigOpts = await waitForOpts();
// Default config
const defaultHotkeysConfig = {
canvas_hotkey_zoom: "Alt",
canvas_hotkey_adjust: "Ctrl",
canvas_hotkey_reset: "KeyR",
canvas_hotkey_fullscreen: "KeyS",
canvas_hotkey_move: "KeyF",
canvas_hotkey_overlap: "KeyO",
canvas_disabled_functions: [],
canvas_show_tooltip: true,
canvas_blur_prompt: false
};
const functionMap = {
"Zoom": "canvas_hotkey_zoom",
"Adjust brush size": "canvas_hotkey_adjust",
"Moving canvas": "canvas_hotkey_move",
"Fullscreen": "canvas_hotkey_fullscreen",
"Reset Zoom": "canvas_hotkey_reset",
"Overlap": "canvas_hotkey_overlap"
};
// Loading the configuration from opts
const preHotkeysConfig = createHotkeyConfig(
defaultHotkeysConfig,
hotkeysConfigOpts
);
// Disable functions that are not needed by the user
const hotkeysConfig = disableFunctions(
preHotkeysConfig,
preHotkeysConfig.canvas_disabled_functions
);
let isMoving = false;
let mouseX, mouseY;
let activeElement;
const elements = Object.fromEntries(
Object.keys(elementIDs).map(id => [
id,
gradioApp().querySelector(elementIDs[id])
])
);
const elemData = {};
// Apply functionality to the range inputs. Restore redmask and correct for long images.
const rangeInputs = elements.rangeGroup ?
Array.from(elements.rangeGroup.querySelectorAll("input")) :
[
gradioApp().querySelector("#img2img_width input[type='range']"),
gradioApp().querySelector("#img2img_height input[type='range']")
];
for (const input of rangeInputs) {
input?.addEventListener("input", () => restoreImgRedMask(elements));
}
function applyZoomAndPan(elemId) {
const targetElement = gradioApp().querySelector(elemId);
if (!targetElement) {
console.log("Element not found");
return;
}
targetElement.style.transformOrigin = "0 0";
elemData[elemId] = {
zoom: 1,
panX: 0,
panY: 0
};
let fullScreenMode = false;
// Create tooltip
function createTooltip() {
const toolTipElemnt =
targetElement.querySelector(".image-container");
const tooltip = document.createElement("div");
tooltip.className = "canvas-tooltip";
// Creating an item of information
const info = document.createElement("i");
info.className = "canvas-tooltip-info";
info.textContent = "";
// Create a container for the contents of the tooltip
const tooltipContent = document.createElement("div");
tooltipContent.className = "canvas-tooltip-content";
// Define an array with hotkey information and their actions
const hotkeysInfo = [
{
configKey: "canvas_hotkey_zoom",
action: "Zoom canvas",
keySuffix: " + wheel"
},
{
configKey: "canvas_hotkey_adjust",
action: "Adjust brush size",
keySuffix: " + wheel"
},
{configKey: "canvas_hotkey_reset", action: "Reset zoom"},
{
configKey: "canvas_hotkey_fullscreen",
action: "Fullscreen mode"
},
{configKey: "canvas_hotkey_move", action: "Move canvas"},
{configKey: "canvas_hotkey_overlap", action: "Overlap"}
];
// Create hotkeys array with disabled property based on the config values
const hotkeys = hotkeysInfo.map(info => {
const configValue = hotkeysConfig[info.configKey];
const key = info.keySuffix ?
`${configValue}${info.keySuffix}` :
configValue.charAt(configValue.length - 1);
return {
key,
action: info.action,
disabled: configValue === "disable"
};
});
for (const hotkey of hotkeys) {
if (hotkey.disabled) {
continue;
}
const p = document.createElement("p");
p.innerHTML = `<b>${hotkey.key}</b> - ${hotkey.action}`;
tooltipContent.appendChild(p);
}
// Add information and content elements to the tooltip element
tooltip.appendChild(info);
tooltip.appendChild(tooltipContent);
// Add a hint element to the target element
toolTipElemnt.appendChild(tooltip);
}
//Show tool tip if setting enable
if (hotkeysConfig.canvas_show_tooltip) {
createTooltip();
}
// In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
function fixCanvas() {
const activeTab = getActiveTab(elements).textContent.trim();
if (activeTab !== "img2img") {
const img = targetElement.querySelector(`${elemId} img`);
if (img && img.style.display !== "none") {
img.style.display = "none";
img.style.visibility = "hidden";
}
}
}
// Reset the zoom level and pan position of the target element to their initial values
function resetZoom() {
elemData[elemId] = {
zoomLevel: 1,
panX: 0,
panY: 0
};
fixCanvas();
targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
const canvas = gradioApp().querySelector(
`${elemId} canvas[key="interface"]`
);
toggleOverlap("off");
fullScreenMode = false;
if (
canvas &&
parseFloat(canvas.style.width) > 865 &&
parseFloat(targetElement.style.width) > 865
) {
fitToElement();
return;
}
targetElement.style.width = "";
if (canvas) {
targetElement.style.height = canvas.style.height;
}
}
// Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
function toggleOverlap(forced = "") {
const zIndex1 = "0";
const zIndex2 = "998";
targetElement.style.zIndex =
targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
if (forced === "off") {
targetElement.style.zIndex = zIndex1;
} else if (forced === "on") {
targetElement.style.zIndex = zIndex2;
}
}
// Adjust the brush size based on the deltaY value from a mouse wheel event
function adjustBrushSize(
elemId,
deltaY,
withoutValue = false,
percentage = 5
) {
const input =
gradioApp().querySelector(
`${elemId} input[aria-label='Brush radius']`
) ||
gradioApp().querySelector(
`${elemId} button[aria-label="Use brush"]`
);
if (input) {
input.click();
if (!withoutValue) {
const maxValue =
parseFloat(input.getAttribute("max")) || 100;
const changeAmount = maxValue * (percentage / 100);
const newValue =
parseFloat(input.value) +
(deltaY > 0 ? -changeAmount : changeAmount);
input.value = Math.min(Math.max(newValue, 0), maxValue);
input.dispatchEvent(new Event("change"));
}
}
}
// Reset zoom when uploading a new image
const fileInput = gradioApp().querySelector(
`${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`
);
fileInput.addEventListener("click", resetZoom);
// Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
function updateZoom(newZoomLevel, mouseX, mouseY) {
newZoomLevel = Math.max(0.5, Math.min(newZoomLevel, 15));
elemData[elemId].panX +=
mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel;
elemData[elemId].panY +=
mouseY - (mouseY * newZoomLevel) / elemData[elemId].zoomLevel;
targetElement.style.transformOrigin = "0 0";
targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`;
toggleOverlap("on");
return newZoomLevel;
}
// Change the zoom level based on user interaction
function changeZoomLevel(operation, e) {
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) {
e.preventDefault();
let zoomPosX, zoomPosY;
let delta = 0.2;
if (elemData[elemId].zoomLevel > 7) {
delta = 0.9;
} else if (elemData[elemId].zoomLevel > 2) {
delta = 0.6;
}
zoomPosX = e.clientX;
zoomPosY = e.clientY;
fullScreenMode = false;
elemData[elemId].zoomLevel = updateZoom(
elemData[elemId].zoomLevel +
(operation === "+" ? delta : -delta),
zoomPosX - targetElement.getBoundingClientRect().left,
zoomPosY - targetElement.getBoundingClientRect().top
);
}
}
/**
* This function fits the target element to the screen by calculating
* the required scale and offsets. It also updates the global variables
* zoomLevel, panX, and panY to reflect the new state.
*/
function fitToElement() {
//Reset Zoom
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
// Get element and screen dimensions
const elementWidth = targetElement.offsetWidth;
const elementHeight = targetElement.offsetHeight;
const parentElement = targetElement.parentElement;
const screenWidth = parentElement.clientWidth;
const screenHeight = parentElement.clientHeight;
// Get element's coordinates relative to the parent element
const elementRect = targetElement.getBoundingClientRect();
const parentRect = parentElement.getBoundingClientRect();
const elementX = elementRect.x - parentRect.x;
// Calculate scale and offsets
const scaleX = screenWidth / elementWidth;
const scaleY = screenHeight / elementHeight;
const scale = Math.min(scaleX, scaleY);
const transformOrigin =
window.getComputedStyle(targetElement).transformOrigin;
const [originX, originY] = transformOrigin.split(" ");
const originXValue = parseFloat(originX);
const originYValue = parseFloat(originY);
const offsetX =
(screenWidth - elementWidth * scale) / 2 -
originXValue * (1 - scale);
const offsetY =
(screenHeight - elementHeight * scale) / 2.5 -
originYValue * (1 - scale);
// Apply scale and offsets to the element
targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
// Update global variables
elemData[elemId].zoomLevel = scale;
elemData[elemId].panX = offsetX;
elemData[elemId].panY = offsetY;
fullScreenMode = false;
toggleOverlap("off");
}
/**
* This function fits the target element to the screen by calculating
* the required scale and offsets. It also updates the global variables
* zoomLevel, panX, and panY to reflect the new state.
*/
// Fullscreen mode
function fitToScreen() {
const canvas = gradioApp().querySelector(
`${elemId} canvas[key="interface"]`
);
if (!canvas) return;
if (canvas.offsetWidth > 862) {
targetElement.style.width = canvas.offsetWidth + "px";
}
if (fullScreenMode) {
resetZoom();
fullScreenMode = false;
return;
}
//Reset Zoom
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
// Get scrollbar width to right-align the image
const scrollbarWidth =
window.innerWidth - document.documentElement.clientWidth;
// Get element and screen dimensions
const elementWidth = targetElement.offsetWidth;
const elementHeight = targetElement.offsetHeight;
const screenWidth = window.innerWidth - scrollbarWidth;
const screenHeight = window.innerHeight;
// Get element's coordinates relative to the page
const elementRect = targetElement.getBoundingClientRect();
const elementY = elementRect.y;
const elementX = elementRect.x;
// Calculate scale and offsets
const scaleX = screenWidth / elementWidth;
const scaleY = screenHeight / elementHeight;
const scale = Math.min(scaleX, scaleY);
// Get the current transformOrigin
const computedStyle = window.getComputedStyle(targetElement);
const transformOrigin = computedStyle.transformOrigin;
const [originX, originY] = transformOrigin.split(" ");
const originXValue = parseFloat(originX);
const originYValue = parseFloat(originY);
// Calculate offsets with respect to the transformOrigin
const offsetX =
(screenWidth - elementWidth * scale) / 2 -
elementX -
originXValue * (1 - scale);
const offsetY =
(screenHeight - elementHeight * scale) / 2 -
elementY -
originYValue * (1 - scale);
// Apply scale and offsets to the element
targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
// Update global variables
elemData[elemId].zoomLevel = scale;
elemData[elemId].panX = offsetX;
elemData[elemId].panY = offsetY;
fullScreenMode = true;
toggleOverlap("on");
}
// Handle keydown events
function handleKeyDown(event) {
// Disable key locks to make pasting from the buffer work correctly
if ((event.ctrlKey && event.code === 'KeyV') || (event.ctrlKey && event.code === 'KeyC') || event.code === "F5") {
return;
}
// before activating shortcut, ensure user is not actively typing in an input field
if (!hotkeysConfig.canvas_blur_prompt) {
if (event.target.nodeName === 'TEXTAREA' || event.target.nodeName === 'INPUT') {
return;
}
}
const hotkeyActions = {
[hotkeysConfig.canvas_hotkey_reset]: resetZoom,
[hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap,
[hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen
};
const action = hotkeyActions[event.code];
if (action) {
event.preventDefault();
action(event);
}
if (
isModifierKey(event, hotkeysConfig.canvas_hotkey_zoom) ||
isModifierKey(event, hotkeysConfig.canvas_hotkey_adjust)
) {
event.preventDefault();
}
}
// Get Mouse position
function getMousePosition(e) {
mouseX = e.offsetX;
mouseY = e.offsetY;
}
targetElement.addEventListener("mousemove", getMousePosition);
// Handle events only inside the targetElement
let isKeyDownHandlerAttached = false;
function handleMouseMove() {
if (!isKeyDownHandlerAttached) {
document.addEventListener("keydown", handleKeyDown);
isKeyDownHandlerAttached = true;
activeElement = elemId;
}
}
function handleMouseLeave() {
if (isKeyDownHandlerAttached) {
document.removeEventListener("keydown", handleKeyDown);
isKeyDownHandlerAttached = false;
activeElement = null;
}
}
// Add mouse event handlers
targetElement.addEventListener("mousemove", handleMouseMove);
targetElement.addEventListener("mouseleave", handleMouseLeave);
// Reset zoom when click on another tab
elements.img2imgTabs.addEventListener("click", resetZoom);
elements.img2imgTabs.addEventListener("click", () => {
// targetElement.style.width = "";
if (parseInt(targetElement.style.width) > 865) {
setTimeout(fitToElement, 0);
}
});
targetElement.addEventListener("wheel", e => {
// change zoom level
const operation = e.deltaY > 0 ? "-" : "+";
changeZoomLevel(operation, e);
// Handle brush size adjustment with ctrl key pressed
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) {
e.preventDefault();
// Increase or decrease brush size based on scroll direction
adjustBrushSize(elemId, e.deltaY);
}
});
// Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
function handleMoveKeyDown(e) {
// Disable key locks to make pasting from the buffer work correctly
if ((e.ctrlKey && e.code === 'KeyV') || (e.ctrlKey && event.code === 'KeyC') || e.code === "F5") {
return;
}
// before activating shortcut, ensure user is not actively typing in an input field
if (!hotkeysConfig.canvas_blur_prompt) {
if (e.target.nodeName === 'TEXTAREA' || e.target.nodeName === 'INPUT') {
return;
}
}
if (e.code === hotkeysConfig.canvas_hotkey_move) {
if (!e.ctrlKey && !e.metaKey && isKeyDownHandlerAttached) {
e.preventDefault();
document.activeElement.blur();
isMoving = true;
}
}
}
function handleMoveKeyUp(e) {
if (e.code === hotkeysConfig.canvas_hotkey_move) {
isMoving = false;
}
}
document.addEventListener("keydown", handleMoveKeyDown);
document.addEventListener("keyup", handleMoveKeyUp);
// Detect zoom level and update the pan speed.
function updatePanPosition(movementX, movementY) {
let panSpeed = 2;
if (elemData[elemId].zoomLevel > 8) {
panSpeed = 3.5;
}
elemData[elemId].panX += movementX * panSpeed;
elemData[elemId].panY += movementY * panSpeed;
// Delayed redraw of an element
requestAnimationFrame(() => {
targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${elemData[elemId].zoomLevel})`;
toggleOverlap("on");
});
}
function handleMoveByKey(e) {
if (isMoving && elemId === activeElement) {
updatePanPosition(e.movementX, e.movementY);
targetElement.style.pointerEvents = "none";
} else {
targetElement.style.pointerEvents = "auto";
}
}
// Prevents sticking to the mouse
window.onblur = function() {
isMoving = false;
};
gradioApp().addEventListener("mousemove", handleMoveByKey);
}
applyZoomAndPan(elementIDs.sketch);
applyZoomAndPan(elementIDs.inpaint);
applyZoomAndPan(elementIDs.inpaintSketch);
// Make the function global so that other extensions can take advantage of this solution
window.applyZoomAndPan = applyZoomAndPan;
});

View File

@ -0,0 +1,14 @@
import gradio as gr
from modules import shared
shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), {
"canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
"canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
"canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
"canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
"canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
}))

View File

@ -0,0 +1,63 @@
.canvas-tooltip-info {
position: absolute;
top: 10px;
left: 10px;
cursor: help;
background-color: rgba(0, 0, 0, 0.3);
width: 20px;
height: 20px;
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
flex-direction: column;
z-index: 100;
}
.canvas-tooltip-info::after {
content: '';
display: block;
width: 2px;
height: 7px;
background-color: white;
margin-top: 2px;
}
.canvas-tooltip-info::before {
content: '';
display: block;
width: 2px;
height: 2px;
background-color: white;
}
.canvas-tooltip-content {
display: none;
background-color: #f9f9f9;
color: #333;
border: 1px solid #ddd;
padding: 15px;
position: absolute;
top: 40px;
left: 10px;
width: 250px;
font-size: 16px;
opacity: 0;
border-radius: 8px;
box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
z-index: 100;
}
.canvas-tooltip:hover .canvas-tooltip-content {
display: block;
animation: fadeIn 0.5s;
opacity: 1;
}
@keyframes fadeIn {
from {opacity: 0;}
to {opacity: 1;}
}

View File

@ -0,0 +1,48 @@
import gradio as gr
from modules import scripts, shared, ui_components, ui_settings
from modules.ui_components import FormColumn
class ExtraOptionsSection(scripts.Script):
section = "extra_options"
def __init__(self):
self.comps = None
self.setting_names = None
def title(self):
return "Extra options"
def show(self, is_img2img):
return scripts.AlwaysVisible
def ui(self, is_img2img):
self.comps = []
self.setting_names = []
with gr.Blocks() as interface:
with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group(), gr.Row():
for setting_name in shared.opts.extra_options:
with FormColumn():
comp = ui_settings.create_setting_component(setting_name)
self.comps.append(comp)
self.setting_names.append(setting_name)
def get_settings_values():
return [ui_settings.get_value_for_setting(key) for key in self.setting_names]
interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False)
return self.comps
def before_process(self, p, *args):
for name, value in zip(self.setting_names, args):
if name not in p.override_settings:
p.override_settings[name] = value
shared.options_templates.update(shared.options_section(('ui', "User interface"), {
"extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_restart(),
"extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion")
}))

View File

@ -5,7 +5,7 @@
function checkBrackets(textArea, counterElt) { function checkBrackets(textArea, counterElt) {
var counts = {}; var counts = {};
(textArea.value.match(/[(){}\[\]]/g) || []).forEach(bracket => { (textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => {
counts[bracket] = (counts[bracket] || 0) + 1; counts[bracket] = (counts[bracket] || 0) + 1;
}); });
var errors = []; var errors = [];
@ -27,14 +27,14 @@ function checkBrackets(textArea, counterElt) {
function setupBracketChecking(id_prompt, id_counter) { function setupBracketChecking(id_prompt, id_counter) {
var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
var counter = gradioApp().getElementById(id_counter) var counter = gradioApp().getElementById(id_counter);
if (textarea && counter) { if (textarea && counter) {
textarea.addEventListener("input", () => checkBrackets(textarea, counter)); textarea.addEventListener("input", () => checkBrackets(textarea, counter));
} }
} }
onUiLoaded(function () { onUiLoaded(function() {
setupBracketChecking('txt2img_prompt', 'txt2img_token_counter'); setupBracketChecking('txt2img_prompt', 'txt2img_token_counter');
setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter'); setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter');
setupBracketChecking('img2img_prompt', 'img2img_token_counter'); setupBracketChecking('img2img_prompt', 'img2img_token_counter');

View File

@ -1,15 +1,14 @@
<div class='card' style={style} onclick={card_clicked}> <div class='card' style={style} onclick={card_clicked} {sort_keys}>
{background_image}
{metadata_button} {metadata_button}
<div class='actions'> <div class='actions'>
<div class='additional'> <div class='additional'>
<ul> <ul>
<a href="#" title="replace preview image with currently selected in gallery" onclick={save_card_preview}>replace preview</a> <a href="#" title="replace preview image with currently selected in gallery" onclick={save_card_preview}>replace preview</a>
</ul> </ul>
<span style="display:none" class='search_term{serach_only}'>{search_term}</span> <span style="display:none" class='search_term{search_only}'>{search_term}</span>
</div> </div>
<span class='name'>{name}</span> <span class='name'>{name}</span>
<span class='description'>{description}</span> <span class='description'>{description}</span>
</div> </div>
</div> </div>

View File

@ -1,10 +1,12 @@
<div> <div>
<a href="/docs">API</a> <a href="{api_docs}">API</a>
 •   • 
<a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui">Github</a> <a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui">Github</a>
 •   • 
<a href="https://gradio.app">Gradio</a> <a href="https://gradio.app">Gradio</a>
 •   • 
<a href="#" onclick="showProfile('./internal/profile-startup'); return false;">Startup profile</a>
 • 
<a href="/" onclick="javascript:gradioApp().getElementById('settings_restart_gradio').click(); return false">Reload UI</a> <a href="/" onclick="javascript:gradioApp().getElementById('settings_restart_gradio').click(); return false">Reload UI</a>
</div> </div>
<br /> <br />

View File

@ -662,3 +662,29 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE. THE SOFTWARE.
</pre> </pre>
<h2><a href="https://github.com/madebyollin/taesd/blob/main/LICENSE">TAESD</a></h2>
<small>Tiny AutoEncoder for Stable Diffusion option for live previews</small>
<pre>
MIT License
Copyright (c) 2023 Ollin Boer Bohan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
</pre>

View File

@ -1,78 +1,78 @@
let currentWidth = null; let currentWidth = null;
let currentHeight = null; let currentHeight = null;
let arFrameTimeout = setTimeout(function(){},0); let arFrameTimeout = setTimeout(function() {}, 0);
function dimensionChange(e, is_width, is_height){ function dimensionChange(e, is_width, is_height) {
if(is_width){ if (is_width) {
currentWidth = e.target.value*1.0 currentWidth = e.target.value * 1.0;
} }
if(is_height){ if (is_height) {
currentHeight = e.target.value*1.0 currentHeight = e.target.value * 1.0;
} }
var inImg2img = gradioApp().querySelector("#tab_img2img").style.display == "block"; var inImg2img = gradioApp().querySelector("#tab_img2img").style.display == "block";
if(!inImg2img){ if (!inImg2img) {
return; return;
} }
var targetElement = null; var targetElement = null;
var tabIndex = get_tab_index('mode_img2img') var tabIndex = get_tab_index('mode_img2img');
if(tabIndex == 0){ // img2img if (tabIndex == 0) { // img2img
targetElement = gradioApp().querySelector('#img2img_image div[data-testid=image] img'); targetElement = gradioApp().querySelector('#img2img_image div[data-testid=image] img');
} else if(tabIndex == 1){ //Sketch } else if (tabIndex == 1) { //Sketch
targetElement = gradioApp().querySelector('#img2img_sketch div[data-testid=image] img'); targetElement = gradioApp().querySelector('#img2img_sketch div[data-testid=image] img');
} else if(tabIndex == 2){ // Inpaint } else if (tabIndex == 2) { // Inpaint
targetElement = gradioApp().querySelector('#img2maskimg div[data-testid=image] img'); targetElement = gradioApp().querySelector('#img2maskimg div[data-testid=image] img');
} else if(tabIndex == 3){ // Inpaint sketch } else if (tabIndex == 3) { // Inpaint sketch
targetElement = gradioApp().querySelector('#inpaint_sketch div[data-testid=image] img'); targetElement = gradioApp().querySelector('#inpaint_sketch div[data-testid=image] img');
} }
if(targetElement){ if (targetElement) {
var arPreviewRect = gradioApp().querySelector('#imageARPreview'); var arPreviewRect = gradioApp().querySelector('#imageARPreview');
if(!arPreviewRect){ if (!arPreviewRect) {
arPreviewRect = document.createElement('div') arPreviewRect = document.createElement('div');
arPreviewRect.id = "imageARPreview"; arPreviewRect.id = "imageARPreview";
gradioApp().appendChild(arPreviewRect) gradioApp().appendChild(arPreviewRect);
} }
var viewportOffset = targetElement.getBoundingClientRect(); var viewportOffset = targetElement.getBoundingClientRect();
var viewportscale = Math.min( targetElement.clientWidth/targetElement.naturalWidth, targetElement.clientHeight/targetElement.naturalHeight ) var viewportscale = Math.min(targetElement.clientWidth / targetElement.naturalWidth, targetElement.clientHeight / targetElement.naturalHeight);
var scaledx = targetElement.naturalWidth*viewportscale var scaledx = targetElement.naturalWidth * viewportscale;
var scaledy = targetElement.naturalHeight*viewportscale var scaledy = targetElement.naturalHeight * viewportscale;
var cleintRectTop = (viewportOffset.top+window.scrollY) var cleintRectTop = (viewportOffset.top + window.scrollY);
var cleintRectLeft = (viewportOffset.left+window.scrollX) var cleintRectLeft = (viewportOffset.left + window.scrollX);
var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight/2) var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight / 2);
var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth/2) var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth / 2);
var arscale = Math.min( scaledx/currentWidth, scaledy/currentHeight ) var arscale = Math.min(scaledx / currentWidth, scaledy / currentHeight);
var arscaledx = currentWidth*arscale var arscaledx = currentWidth * arscale;
var arscaledy = currentHeight*arscale var arscaledy = currentHeight * arscale;
var arRectTop = cleintRectCentreY-(arscaledy/2) var arRectTop = cleintRectCentreY - (arscaledy / 2);
var arRectLeft = cleintRectCentreX-(arscaledx/2) var arRectLeft = cleintRectCentreX - (arscaledx / 2);
var arRectWidth = arscaledx var arRectWidth = arscaledx;
var arRectHeight = arscaledy var arRectHeight = arscaledy;
arPreviewRect.style.top = arRectTop+'px'; arPreviewRect.style.top = arRectTop + 'px';
arPreviewRect.style.left = arRectLeft+'px'; arPreviewRect.style.left = arRectLeft + 'px';
arPreviewRect.style.width = arRectWidth+'px'; arPreviewRect.style.width = arRectWidth + 'px';
arPreviewRect.style.height = arRectHeight+'px'; arPreviewRect.style.height = arRectHeight + 'px';
clearTimeout(arFrameTimeout); clearTimeout(arFrameTimeout);
arFrameTimeout = setTimeout(function(){ arFrameTimeout = setTimeout(function() {
arPreviewRect.style.display = 'none'; arPreviewRect.style.display = 'none';
},2000); }, 2000);
arPreviewRect.style.display = 'block'; arPreviewRect.style.display = 'block';
@ -81,31 +81,33 @@ function dimensionChange(e, is_width, is_height){
} }
onUiUpdate(function(){ onAfterUiUpdate(function() {
var arPreviewRect = gradioApp().querySelector('#imageARPreview'); var arPreviewRect = gradioApp().querySelector('#imageARPreview');
if(arPreviewRect){ if (arPreviewRect) {
arPreviewRect.style.display = 'none'; arPreviewRect.style.display = 'none';
} }
var tabImg2img = gradioApp().querySelector("#tab_img2img"); var tabImg2img = gradioApp().querySelector("#tab_img2img");
if (tabImg2img) { if (tabImg2img) {
var inImg2img = tabImg2img.style.display == "block"; var inImg2img = tabImg2img.style.display == "block";
if(inImg2img){ if (inImg2img) {
let inputs = gradioApp().querySelectorAll('input'); let inputs = gradioApp().querySelectorAll('input');
inputs.forEach(function(e){ inputs.forEach(function(e) {
var is_width = e.parentElement.id == "img2img_width" var is_width = e.parentElement.id == "img2img_width";
var is_height = e.parentElement.id == "img2img_height" var is_height = e.parentElement.id == "img2img_height";
if((is_width || is_height) && !e.classList.contains('scrollwatch')){ if ((is_width || is_height) && !e.classList.contains('scrollwatch')) {
e.addEventListener('input', function(e){dimensionChange(e, is_width, is_height)} ) e.addEventListener('input', function(e) {
e.classList.add('scrollwatch') dimensionChange(e, is_width, is_height);
});
e.classList.add('scrollwatch');
} }
if(is_width){ if (is_width) {
currentWidth = e.value*1.0 currentWidth = e.value * 1.0;
} }
if(is_height){ if (is_height) {
currentHeight = e.value*1.0 currentHeight = e.value * 1.0;
} }
}) });
} }
} }
}); });

View File

@ -1,48 +1,48 @@
contextMenuInit = function(){ var contextMenuInit = function() {
let eventListenerApplied=false; let eventListenerApplied = false;
let menuSpecs = new Map(); let menuSpecs = new Map();
const uid = function(){ const uid = function() {
return Date.now().toString(36) + Math.random().toString(36).substring(2); return Date.now().toString(36) + Math.random().toString(36).substring(2);
} };
function showContextMenu(event,element,menuEntries){ function showContextMenu(event, element, menuEntries) {
let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft; let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop; let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop;
let oldMenu = gradioApp().querySelector('#context-menu') let oldMenu = gradioApp().querySelector('#context-menu');
if(oldMenu){ if (oldMenu) {
oldMenu.remove() oldMenu.remove();
} }
let baseStyle = window.getComputedStyle(uiCurrentTab) let baseStyle = window.getComputedStyle(uiCurrentTab);
const contextMenu = document.createElement('nav') const contextMenu = document.createElement('nav');
contextMenu.id = "context-menu" contextMenu.id = "context-menu";
contextMenu.style.background = baseStyle.background contextMenu.style.background = baseStyle.background;
contextMenu.style.color = baseStyle.color contextMenu.style.color = baseStyle.color;
contextMenu.style.fontFamily = baseStyle.fontFamily contextMenu.style.fontFamily = baseStyle.fontFamily;
contextMenu.style.top = posy+'px' contextMenu.style.top = posy + 'px';
contextMenu.style.left = posx+'px' contextMenu.style.left = posx + 'px';
const contextMenuList = document.createElement('ul') const contextMenuList = document.createElement('ul');
contextMenuList.className = 'context-menu-items'; contextMenuList.className = 'context-menu-items';
contextMenu.append(contextMenuList); contextMenu.append(contextMenuList);
menuEntries.forEach(function(entry){ menuEntries.forEach(function(entry) {
let contextMenuEntry = document.createElement('a') let contextMenuEntry = document.createElement('a');
contextMenuEntry.innerHTML = entry['name'] contextMenuEntry.innerHTML = entry['name'];
contextMenuEntry.addEventListener("click", function() { contextMenuEntry.addEventListener("click", function() {
entry['func'](); entry['func']();
}) });
contextMenuList.append(contextMenuEntry); contextMenuList.append(contextMenuEntry);
}) });
gradioApp().appendChild(contextMenu) gradioApp().appendChild(contextMenu);
let menuWidth = contextMenu.offsetWidth + 4; let menuWidth = contextMenu.offsetWidth + 4;
let menuHeight = contextMenu.offsetHeight + 4; let menuHeight = contextMenu.offsetHeight + 4;
@ -50,117 +50,127 @@ contextMenuInit = function(){
let windowWidth = window.innerWidth; let windowWidth = window.innerWidth;
let windowHeight = window.innerHeight; let windowHeight = window.innerHeight;
if ( (windowWidth - posx) < menuWidth ) { if ((windowWidth - posx) < menuWidth) {
contextMenu.style.left = windowWidth - menuWidth + "px"; contextMenu.style.left = windowWidth - menuWidth + "px";
} }
if ( (windowHeight - posy) < menuHeight ) { if ((windowHeight - posy) < menuHeight) {
contextMenu.style.top = windowHeight - menuHeight + "px"; contextMenu.style.top = windowHeight - menuHeight + "px";
} }
} }
function appendContextMenuOption(targetElementSelector,entryName,entryFunction){ function appendContextMenuOption(targetElementSelector, entryName, entryFunction) {
var currentItems = menuSpecs.get(targetElementSelector) var currentItems = menuSpecs.get(targetElementSelector);
if(!currentItems){ if (!currentItems) {
currentItems = [] currentItems = [];
menuSpecs.set(targetElementSelector,currentItems); menuSpecs.set(targetElementSelector, currentItems);
} }
let newItem = {'id':targetElementSelector+'_'+uid(), let newItem = {
'name':entryName, id: targetElementSelector + '_' + uid(),
'func':entryFunction, name: entryName,
'isNew':true} func: entryFunction,
isNew: true
};
currentItems.push(newItem) currentItems.push(newItem);
return newItem['id'] return newItem['id'];
} }
function removeContextMenuOption(uid){ function removeContextMenuOption(uid) {
menuSpecs.forEach(function(v) { menuSpecs.forEach(function(v) {
let index = -1 let index = -1;
v.forEach(function(e,ei){if(e['id']==uid){index=ei}}) v.forEach(function(e, ei) {
if(index>=0){ if (e['id'] == uid) {
index = ei;
}
});
if (index >= 0) {
v.splice(index, 1); v.splice(index, 1);
} }
}) });
} }
function addContextMenuEventListener(){ function addContextMenuEventListener() {
if(eventListenerApplied){ if (eventListenerApplied) {
return; return;
} }
gradioApp().addEventListener("click", function(e) { gradioApp().addEventListener("click", function(e) {
if(! e.isTrusted){ if (!e.isTrusted) {
return return;
} }
let oldMenu = gradioApp().querySelector('#context-menu') let oldMenu = gradioApp().querySelector('#context-menu');
if(oldMenu){ if (oldMenu) {
oldMenu.remove() oldMenu.remove();
} }
}); });
gradioApp().addEventListener("contextmenu", function(e) { gradioApp().addEventListener("contextmenu", function(e) {
let oldMenu = gradioApp().querySelector('#context-menu') let oldMenu = gradioApp().querySelector('#context-menu');
if(oldMenu){ if (oldMenu) {
oldMenu.remove() oldMenu.remove();
} }
menuSpecs.forEach(function(v,k) { menuSpecs.forEach(function(v, k) {
if(e.composedPath()[0].matches(k)){ if (e.composedPath()[0].matches(k)) {
showContextMenu(e,e.composedPath()[0],v) showContextMenu(e, e.composedPath()[0], v);
e.preventDefault() e.preventDefault();
} }
})
}); });
eventListenerApplied=true });
eventListenerApplied = true;
} }
return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener] return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener];
} };
initResponse = contextMenuInit(); var initResponse = contextMenuInit();
appendContextMenuOption = initResponse[0]; var appendContextMenuOption = initResponse[0];
removeContextMenuOption = initResponse[1]; var removeContextMenuOption = initResponse[1];
addContextMenuEventListener = initResponse[2]; var addContextMenuEventListener = initResponse[2];
(function(){ (function() {
//Start example Context Menu Items //Start example Context Menu Items
let generateOnRepeat = function(genbuttonid,interruptbuttonid){ let generateOnRepeat = function(genbuttonid, interruptbuttonid) {
let genbutton = gradioApp().querySelector(genbuttonid); let genbutton = gradioApp().querySelector(genbuttonid);
let interruptbutton = gradioApp().querySelector(interruptbuttonid); let interruptbutton = gradioApp().querySelector(interruptbuttonid);
if(!interruptbutton.offsetParent){ if (!interruptbutton.offsetParent) {
genbutton.click(); genbutton.click();
} }
clearInterval(window.generateOnRepeatInterval) clearInterval(window.generateOnRepeatInterval);
window.generateOnRepeatInterval = setInterval(function(){ window.generateOnRepeatInterval = setInterval(function() {
if(!interruptbutton.offsetParent){ if (!interruptbutton.offsetParent) {
genbutton.click(); genbutton.click();
} }
}, },
500) 500);
} };
appendContextMenuOption('#txt2img_generate','Generate forever',function(){ let generateOnRepeat_txt2img = function() {
generateOnRepeat('#txt2img_generate','#txt2img_interrupt'); generateOnRepeat('#txt2img_generate', '#txt2img_interrupt');
}) };
appendContextMenuOption('#img2img_generate','Generate forever',function(){
generateOnRepeat('#img2img_generate','#img2img_interrupt');
})
let cancelGenerateForever = function(){ let generateOnRepeat_img2img = function() {
clearInterval(window.generateOnRepeatInterval) generateOnRepeat('#img2img_generate', '#img2img_interrupt');
} };
appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever) appendContextMenuOption('#txt2img_generate', 'Generate forever', generateOnRepeat_txt2img);
appendContextMenuOption('#txt2img_generate', 'Cancel generate forever',cancelGenerateForever) appendContextMenuOption('#txt2img_interrupt', 'Generate forever', generateOnRepeat_txt2img);
appendContextMenuOption('#img2img_interrupt','Cancel generate forever',cancelGenerateForever) appendContextMenuOption('#img2img_generate', 'Generate forever', generateOnRepeat_img2img);
appendContextMenuOption('#img2img_generate', 'Cancel generate forever',cancelGenerateForever) appendContextMenuOption('#img2img_interrupt', 'Generate forever', generateOnRepeat_img2img);
let cancelGenerateForever = function() {
clearInterval(window.generateOnRepeatInterval);
};
appendContextMenuOption('#txt2img_interrupt', 'Cancel generate forever', cancelGenerateForever);
appendContextMenuOption('#txt2img_generate', 'Cancel generate forever', cancelGenerateForever);
appendContextMenuOption('#img2img_interrupt', 'Cancel generate forever', cancelGenerateForever);
appendContextMenuOption('#img2img_generate', 'Cancel generate forever', cancelGenerateForever);
})(); })();
//End example Context Menu Items //End example Context Menu Items
onUiUpdate(function(){ onAfterUiUpdate(addContextMenuEventListener);
addContextMenuEventListener()
});

View File

@ -1,11 +1,11 @@
// allows drag-dropping files into gradio image elements, and also pasting images from clipboard // allows drag-dropping files into gradio image elements, and also pasting images from clipboard
function isValidImageList( files ) { function isValidImageList(files) {
return files && files?.length === 1 && ['image/png', 'image/gif', 'image/jpeg'].includes(files[0].type); return files && files?.length === 1 && ['image/png', 'image/gif', 'image/jpeg'].includes(files[0].type);
} }
function dropReplaceImage( imgWrap, files ) { function dropReplaceImage(imgWrap, files) {
if ( ! isValidImageList( files ) ) { if (!isValidImageList(files)) {
return; return;
} }
@ -14,8 +14,8 @@ function dropReplaceImage( imgWrap, files ) {
imgWrap.querySelector('.modify-upload button + button, .touch-none + div button + button')?.click(); imgWrap.querySelector('.modify-upload button + button, .touch-none + div button + button')?.click();
const callback = () => { const callback = () => {
const fileInput = imgWrap.querySelector('input[type="file"]'); const fileInput = imgWrap.querySelector('input[type="file"]');
if ( fileInput ) { if (fileInput) {
if ( files.length === 0 ) { if (files.length === 0) {
files = new DataTransfer(); files = new DataTransfer();
files.items.add(tmpFile); files.items.add(tmpFile);
fileInput.files = files.files; fileInput.files = files.files;
@ -26,34 +26,49 @@ function dropReplaceImage( imgWrap, files ) {
} }
}; };
if ( imgWrap.closest('#pnginfo_image') ) { if (imgWrap.closest('#pnginfo_image')) {
// special treatment for PNG Info tab, wait for fetch request to finish // special treatment for PNG Info tab, wait for fetch request to finish
const oldFetch = window.fetch; const oldFetch = window.fetch;
window.fetch = async (input, options) => { window.fetch = async(input, options) => {
const response = await oldFetch(input, options); const response = await oldFetch(input, options);
if ( 'api/predict/' === input ) { if ('api/predict/' === input) {
const content = await response.text(); const content = await response.text();
window.fetch = oldFetch; window.fetch = oldFetch;
window.requestAnimationFrame( () => callback() ); window.requestAnimationFrame(() => callback());
return new Response(content, { return new Response(content, {
status: response.status, status: response.status,
statusText: response.statusText, statusText: response.statusText,
headers: response.headers headers: response.headers
}) });
} }
return response; return response;
}; };
} else { } else {
window.requestAnimationFrame( () => callback() ); window.requestAnimationFrame(() => callback());
} }
} }
function eventHasFiles(e) {
if (!e.dataTransfer || !e.dataTransfer.files) return false;
if (e.dataTransfer.files.length > 0) return true;
if (e.dataTransfer.items.length > 0 && e.dataTransfer.items[0].kind == "file") return true;
return false;
}
function dragDropTargetIsPrompt(target) {
if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true;
if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true;
return false;
}
window.document.addEventListener('dragover', e => { window.document.addEventListener('dragover', e => {
const target = e.composedPath()[0]; const target = e.composedPath()[0];
const imgWrap = target.closest('[data-testid="image"]'); if (!eventHasFiles(e)) return;
if ( !imgWrap && target.placeholder && target.placeholder.indexOf("Prompt") == -1) {
return; var targetImage = target.closest('[data-testid="image"]');
} if (!dragDropTargetIsPrompt(target) && !targetImage) return;
e.stopPropagation(); e.stopPropagation();
e.preventDefault(); e.preventDefault();
e.dataTransfer.dropEffect = 'copy'; e.dataTransfer.dropEffect = 'copy';
@ -61,28 +76,45 @@ window.document.addEventListener('dragover', e => {
window.document.addEventListener('drop', e => { window.document.addEventListener('drop', e => {
const target = e.composedPath()[0]; const target = e.composedPath()[0];
if (target.placeholder.indexOf("Prompt") == -1) { if (!eventHasFiles(e)) return;
return;
if (dragDropTargetIsPrompt(target)) {
e.stopPropagation();
e.preventDefault();
let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image";
const imgParent = gradioApp().getElementById(prompt_target);
const files = e.dataTransfer.files;
const fileInput = imgParent.querySelector('input[type="file"]');
if (fileInput) {
fileInput.files = files;
fileInput.dispatchEvent(new Event('change'));
} }
const imgWrap = target.closest('[data-testid="image"]');
if ( !imgWrap ) {
return;
} }
var targetImage = target.closest('[data-testid="image"]');
if (targetImage) {
e.stopPropagation(); e.stopPropagation();
e.preventDefault(); e.preventDefault();
const files = e.dataTransfer.files; const files = e.dataTransfer.files;
dropReplaceImage( imgWrap, files ); dropReplaceImage(targetImage, files);
return;
}
}); });
window.addEventListener('paste', e => { window.addEventListener('paste', e => {
const files = e.clipboardData.files; const files = e.clipboardData.files;
if ( ! isValidImageList( files ) ) { if (!isValidImageList(files)) {
return; return;
} }
const visibleImageFields = [...gradioApp().querySelectorAll('[data-testid="image"]')] const visibleImageFields = [...gradioApp().querySelectorAll('[data-testid="image"]')]
.filter(el => uiElementIsVisible(el)); .filter(el => uiElementIsVisible(el))
if ( ! visibleImageFields.length ) { .sort((a, b) => uiElementInSight(b) - uiElementInSight(a));
if (!visibleImageFields.length) {
return; return;
} }
@ -93,5 +125,6 @@ window.addEventListener('paste', e => {
firstFreeImageField ? firstFreeImageField ?
firstFreeImageField : firstFreeImageField :
visibleImageFields[visibleImageFields.length - 1] visibleImageFields[visibleImageFields.length - 1]
, files ); , files
);
}); });

View File

@ -1,17 +1,17 @@
function keyupEditAttention(event){ function keyupEditAttention(event) {
let target = event.originalTarget || event.composedPath()[0]; let target = event.originalTarget || event.composedPath()[0];
if (! target.matches("[id*='_toprow'] [id*='_prompt'] textarea")) return; if (!target.matches("*:is([id*='_toprow'] [id*='_prompt'], .prompt) textarea")) return;
if (! (event.metaKey || event.ctrlKey)) return; if (!(event.metaKey || event.ctrlKey)) return;
let isPlus = event.key == "ArrowUp" let isPlus = event.key == "ArrowUp";
let isMinus = event.key == "ArrowDown" let isMinus = event.key == "ArrowDown";
if (!isPlus && !isMinus) return; if (!isPlus && !isMinus) return;
let selectionStart = target.selectionStart; let selectionStart = target.selectionStart;
let selectionEnd = target.selectionEnd; let selectionEnd = target.selectionEnd;
let text = target.value; let text = target.value;
function selectCurrentParenthesisBlock(OPEN, CLOSE){ function selectCurrentParenthesisBlock(OPEN, CLOSE) {
if (selectionStart !== selectionEnd) return false; if (selectionStart !== selectionEnd) return false;
// Find opening parenthesis around current cursor // Find opening parenthesis around current cursor
@ -44,7 +44,7 @@ function keyupEditAttention(event){
return true; return true;
} }
function selectCurrentWord(){ function selectCurrentWord() {
if (selectionStart !== selectionEnd) return false; if (selectionStart !== selectionEnd) return false;
const delimiters = opts.keyedit_delimiters + " \r\n\t"; const delimiters = opts.keyedit_delimiters + " \r\n\t";
@ -69,20 +69,20 @@ function keyupEditAttention(event){
event.preventDefault(); event.preventDefault();
var closeCharacter = ')' var closeCharacter = ')';
var delta = opts.keyedit_precision_attention var delta = opts.keyedit_precision_attention;
if (selectionStart > 0 && text[selectionStart - 1] == '<'){ if (selectionStart > 0 && text[selectionStart - 1] == '<') {
closeCharacter = '>' closeCharacter = '>';
delta = opts.keyedit_precision_extra delta = opts.keyedit_precision_extra;
} else if (selectionStart == 0 || text[selectionStart - 1] != "(") { } else if (selectionStart == 0 || text[selectionStart - 1] != "(") {
// do not include spaces at the end // do not include spaces at the end
while(selectionEnd > selectionStart && text[selectionEnd-1] == ' '){ while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') {
selectionEnd -= 1; selectionEnd -= 1;
} }
if(selectionStart == selectionEnd){ if (selectionStart == selectionEnd) {
return return;
} }
text = text.slice(0, selectionStart) + "(" + text.slice(selectionStart, selectionEnd) + ":1.0)" + text.slice(selectionEnd); text = text.slice(0, selectionStart) + "(" + text.slice(selectionStart, selectionEnd) + ":1.0)" + text.slice(selectionEnd);
@ -97,14 +97,15 @@ function keyupEditAttention(event){
weight += isPlus ? delta : -delta; weight += isPlus ? delta : -delta;
weight = parseFloat(weight.toPrecision(12)); weight = parseFloat(weight.toPrecision(12));
if(String(weight).length == 1) weight += ".0" if (String(weight).length == 1) weight += ".0";
if (closeCharacter == ')' && weight == 1) { if (closeCharacter == ')' && weight == 1) {
text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + 5); var endParenPos = text.substring(selectionEnd).indexOf(')');
text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + endParenPos + 1);
selectionStart--; selectionStart--;
selectionEnd--; selectionEnd--;
} else { } else {
text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + 1 + end - 1); text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + end);
} }
target.focus(); target.focus();
@ -112,7 +113,7 @@ function keyupEditAttention(event){
target.selectionStart = selectionStart; target.selectionStart = selectionStart;
target.selectionEnd = selectionEnd; target.selectionEnd = selectionEnd;
updateInput(target) updateInput(target);
} }
addEventListener('keydown', (event) => { addEventListener('keydown', (event) => {

41
javascript/edit-order.js Normal file
View File

@ -0,0 +1,41 @@
/* alt+left/right moves text in prompt */
function keyupEditOrder(event) {
if (!opts.keyedit_move) return;
let target = event.originalTarget || event.composedPath()[0];
if (!target.matches("*:is([id*='_toprow'] [id*='_prompt'], .prompt) textarea")) return;
if (!event.altKey) return;
event.preventDefault()
let isLeft = event.key == "ArrowLeft";
let isRight = event.key == "ArrowRight";
if (!isLeft && !isRight) return;
let selectionStart = target.selectionStart;
let selectionEnd = target.selectionEnd;
let text = target.value;
let items = text.split(",");
let indexStart = (text.slice(0, selectionStart).match(/,/g) || []).length;
let indexEnd = (text.slice(0, selectionEnd).match(/,/g) || []).length;
let range = indexEnd - indexStart + 1;
if (isLeft && indexStart > 0) {
items.splice(indexStart - 1, 0, ...items.splice(indexStart, range));
target.value = items.join();
target.selectionStart = items.slice(0, indexStart - 1).join().length + (indexStart == 1 ? 0 : 1);
target.selectionEnd = items.slice(0, indexEnd).join().length;
} else if (isRight && indexEnd < items.length - 1) {
items.splice(indexStart + 1, 0, ...items.splice(indexStart, range));
target.value = items.join();
target.selectionStart = items.slice(0, indexStart + 1).join().length + 1;
target.selectionEnd = items.slice(0, indexEnd + 2).join().length;
}
event.preventDefault();
updateInput(target);
}
addEventListener('keydown', (event) => {
keyupEditOrder(event);
});

View File

@ -1,51 +1,54 @@
function extensions_apply(_disabled_list, _update_list, disable_all){ function extensions_apply(_disabled_list, _update_list, disable_all) {
var disable = [] var disable = [];
var update = [] var update = [];
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){ gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) {
if(x.name.startsWith("enable_") && ! x.checked) if (x.name.startsWith("enable_") && !x.checked) {
disable.push(x.name.substring(7)) disable.push(x.name.substring(7));
}
if(x.name.startsWith("update_") && x.checked) if (x.name.startsWith("update_") && x.checked) {
update.push(x.name.substring(7)) update.push(x.name.substring(7));
}) }
});
restart_reload() restart_reload();
return [JSON.stringify(disable), JSON.stringify(update), disable_all] return [JSON.stringify(disable), JSON.stringify(update), disable_all];
} }
function extensions_check(){ function extensions_check() {
var disable = [] var disable = [];
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){ gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) {
if(x.name.startsWith("enable_") && ! x.checked) if (x.name.startsWith("enable_") && !x.checked) {
disable.push(x.name.substring(7)) disable.push(x.name.substring(7));
}) }
});
gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){ gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x) {
x.innerHTML = "Loading..." x.innerHTML = "Loading...";
}) });
var id = randomId() var id = randomId();
requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function(){ requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function() {
}) });
return [id, JSON.stringify(disable)] return [id, JSON.stringify(disable)];
} }
function install_extension_from_index(button, url){ function install_extension_from_index(button, url) {
button.disabled = "disabled" button.disabled = "disabled";
button.value = "Installing..." button.value = "Installing...";
var textarea = gradioApp().querySelector('#extension_to_install textarea') var textarea = gradioApp().querySelector('#extension_to_install textarea');
textarea.value = url textarea.value = url;
updateInput(textarea) updateInput(textarea);
gradioApp().querySelector('#install_extension_button').click() gradioApp().querySelector('#install_extension_button').click();
} }
function config_state_confirm_restore(_, config_state_name, config_restore_type) { function config_state_confirm_restore(_, config_state_name, config_restore_type) {
@ -63,9 +66,27 @@ function config_state_confirm_restore(_, config_state_name, config_restore_type)
let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + "."); let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + ".");
if (confirmed) { if (confirmed) {
restart_reload(); restart_reload();
gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){ gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x) {
x.innerHTML = "Loading..." x.innerHTML = "Loading...";
}) });
} }
return [confirmed, config_state_name, config_restore_type]; return [confirmed, config_state_name, config_restore_type];
} }
function toggle_all_extensions(event) {
gradioApp().querySelectorAll('#extensions .extension_toggle').forEach(function(checkbox_el) {
checkbox_el.checked = event.target.checked;
});
}
function toggle_extension() {
let all_extensions_toggled = true;
for (const checkbox_el of gradioApp().querySelectorAll('#extensions .extension_toggle')) {
if (!checkbox_el.checked) {
all_extensions_toggled = false;
break;
}
}
gradioApp().querySelector('#extensions .all_extensions_toggle').checked = all_extensions_toggled;
}

View File

@ -1,144 +1,209 @@
function setupExtraNetworksForTab(tabname){ function setupExtraNetworksForTab(tabname) {
gradioApp().querySelector('#'+tabname+'_extra_tabs').classList.add('extra-networks') gradioApp().querySelector('#' + tabname + '_extra_tabs').classList.add('extra-networks');
var tabs = gradioApp().querySelector('#'+tabname+'_extra_tabs > div') var tabs = gradioApp().querySelector('#' + tabname + '_extra_tabs > div');
var search = gradioApp().querySelector('#'+tabname+'_extra_search textarea') var search = gradioApp().querySelector('#' + tabname + '_extra_search textarea');
var refresh = gradioApp().getElementById(tabname+'_extra_refresh') var sort = gradioApp().getElementById(tabname + '_extra_sort');
var sortOrder = gradioApp().getElementById(tabname + '_extra_sortorder');
var refresh = gradioApp().getElementById(tabname + '_extra_refresh');
search.classList.add('search') search.classList.add('search');
tabs.appendChild(search) sort.classList.add('sort');
tabs.appendChild(refresh) sortOrder.classList.add('sortorder');
sort.dataset.sortkey = 'sortDefault';
tabs.appendChild(search);
tabs.appendChild(sort);
tabs.appendChild(sortOrder);
tabs.appendChild(refresh);
var applyFilter = function(){ var applyFilter = function() {
var searchTerm = search.value.toLowerCase() var searchTerm = search.value.toLowerCase();
gradioApp().querySelectorAll('#'+tabname+'_extra_tabs div.card').forEach(function(elem){ gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card').forEach(function(elem) {
var searchOnly = elem.querySelector('.search_only') var searchOnly = elem.querySelector('.search_only');
var text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase() var text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase();
var visible = text.indexOf(searchTerm) != -1 var visible = text.indexOf(searchTerm) != -1;
if(searchOnly && searchTerm.length < 4){ if (searchOnly && searchTerm.length < 4) {
visible = false visible = false;
} }
elem.style.display = visible ? "" : "none" elem.style.display = visible ? "" : "none";
}) });
};
var applySort = function() {
var reverse = sortOrder.classList.contains("sortReverse");
var sortKey = sort.querySelector("input").value.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim();
sortKey = sortKey ? "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1) : "";
var sortKeyStore = sortKey ? sortKey + (reverse ? "Reverse" : "") : "";
if (!sortKey || sortKeyStore == sort.dataset.sortkey) {
return;
} }
sort.dataset.sortkey = sortKeyStore;
var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card');
cards.forEach(function(card) {
card.originalParentElement = card.parentElement;
});
var sortedCards = Array.from(cards);
sortedCards.sort(function(cardA, cardB) {
var a = cardA.dataset[sortKey];
var b = cardB.dataset[sortKey];
if (!isNaN(a) && !isNaN(b)) {
return parseInt(a) - parseInt(b);
}
return (a < b ? -1 : (a > b ? 1 : 0));
});
if (reverse) {
sortedCards.reverse();
}
cards.forEach(function(card) {
card.remove();
});
sortedCards.forEach(function(card) {
card.originalParentElement.appendChild(card);
});
};
search.addEventListener("input", applyFilter); search.addEventListener("input", applyFilter);
applyFilter(); applyFilter();
["change", "blur", "click"].forEach(function(evt) {
sort.querySelector("input").addEventListener(evt, applySort);
});
sortOrder.addEventListener("click", function() {
sortOrder.classList.toggle("sortReverse");
applySort();
});
extraNetworksApplyFilter[tabname] = applyFilter; extraNetworksApplyFilter[tabname] = applyFilter;
} }
function applyExtraNetworkFilter(tabname){ function applyExtraNetworkFilter(tabname) {
setTimeout(extraNetworksApplyFilter[tabname], 1); setTimeout(extraNetworksApplyFilter[tabname], 1);
} }
var extraNetworksApplyFilter = {} var extraNetworksApplyFilter = {};
var activePromptTextarea = {}; var activePromptTextarea = {};
function setupExtraNetworks(){ function setupExtraNetworks() {
setupExtraNetworksForTab('txt2img') setupExtraNetworksForTab('txt2img');
setupExtraNetworksForTab('img2img') setupExtraNetworksForTab('img2img');
function registerPrompt(tabname, id){ function registerPrompt(tabname, id) {
var textarea = gradioApp().querySelector("#" + id + " > label > textarea"); var textarea = gradioApp().querySelector("#" + id + " > label > textarea");
if (! activePromptTextarea[tabname]){ if (!activePromptTextarea[tabname]) {
activePromptTextarea[tabname] = textarea activePromptTextarea[tabname] = textarea;
} }
textarea.addEventListener("focus", function(){ textarea.addEventListener("focus", function() {
activePromptTextarea[tabname] = textarea; activePromptTextarea[tabname] = textarea;
}); });
} }
registerPrompt('txt2img', 'txt2img_prompt') registerPrompt('txt2img', 'txt2img_prompt');
registerPrompt('txt2img', 'txt2img_neg_prompt') registerPrompt('txt2img', 'txt2img_neg_prompt');
registerPrompt('img2img', 'img2img_prompt') registerPrompt('img2img', 'img2img_prompt');
registerPrompt('img2img', 'img2img_neg_prompt') registerPrompt('img2img', 'img2img_neg_prompt');
} }
onUiLoaded(setupExtraNetworks) onUiLoaded(setupExtraNetworks);
var re_extranet = /<([^:]+:[^:]+):[\d\.]+>/; var re_extranet = /<([^:]+:[^:]+):[\d.]+>/;
var re_extranet_g = /\s+<([^:]+:[^:]+):[\d\.]+>/g; var re_extranet_g = /\s+<([^:]+:[^:]+):[\d.]+>/g;
function tryToRemoveExtraNetworkFromPrompt(textarea, text){ function tryToRemoveExtraNetworkFromPrompt(textarea, text) {
var m = text.match(re_extranet) var m = text.match(re_extranet);
if(! m) return false var replaced = false;
var newTextareaText;
var partToSearch = m[1] if (m) {
var replaced = false var partToSearch = m[1];
var newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found){ newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found) {
m = found.match(re_extranet); m = found.match(re_extranet);
if(m[1] == partToSearch){ if (m[1] == partToSearch) {
replaced = true; replaced = true;
return "" return "";
} }
return found; return found;
}) });
} else {
newTextareaText = textarea.value.replaceAll(new RegExp(text, "g"), function(found) {
if (found == text) {
replaced = true;
return "";
}
return found;
});
}
if(replaced){ if (replaced) {
textarea.value = newTextareaText textarea.value = newTextareaText;
return true; return true;
} }
return false return false;
} }
function cardClicked(tabname, textToAdd, allowNegativePrompt){ function cardClicked(tabname, textToAdd, allowNegativePrompt) {
var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea") var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea");
if(! tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)){ if (!tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)) {
textarea.value = textarea.value + opts.extra_networks_add_text_separator + textToAdd textarea.value = textarea.value + opts.extra_networks_add_text_separator + textToAdd;
} }
updateInput(textarea) updateInput(textarea);
} }
function saveCardPreview(event, tabname, filename){ function saveCardPreview(event, tabname, filename) {
var textarea = gradioApp().querySelector("#" + tabname + '_preview_filename > label > textarea') var textarea = gradioApp().querySelector("#" + tabname + '_preview_filename > label > textarea');
var button = gradioApp().getElementById(tabname + '_save_preview') var button = gradioApp().getElementById(tabname + '_save_preview');
textarea.value = filename textarea.value = filename;
updateInput(textarea) updateInput(textarea);
button.click() button.click();
event.stopPropagation() event.stopPropagation();
event.preventDefault() event.preventDefault();
} }
function extraNetworksSearchButton(tabs_id, event){ function extraNetworksSearchButton(tabs_id, event) {
var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea') var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea');
var button = event.target var button = event.target;
var text = button.classList.contains("search-all") ? "" : button.textContent.trim() var text = button.classList.contains("search-all") ? "" : button.textContent.trim();
searchTextarea.value = text searchTextarea.value = text;
updateInput(searchTextarea) updateInput(searchTextarea);
} }
var globalPopup = null; var globalPopup = null;
var globalPopupInner = null; var globalPopupInner = null;
function popup(contents){ function popup(contents) {
if(! globalPopup){ if (!globalPopup) {
globalPopup = document.createElement('div') globalPopup = document.createElement('div');
globalPopup.onclick = function(){ globalPopup.style.display = "none"; }; globalPopup.onclick = function() {
globalPopup.style.display = "none";
};
globalPopup.classList.add('global-popup'); globalPopup.classList.add('global-popup');
var close = document.createElement('div') var close = document.createElement('div');
close.classList.add('global-popup-close'); close.classList.add('global-popup-close');
close.onclick = function(){ globalPopup.style.display = "none"; }; close.onclick = function() {
globalPopup.style.display = "none";
};
close.title = "Close"; close.title = "Close";
globalPopup.appendChild(close) globalPopup.appendChild(close);
globalPopupInner = document.createElement('div') globalPopupInner = document.createElement('div');
globalPopupInner.onclick = function(event){ event.stopPropagation(); return false; }; globalPopupInner.onclick = function(event) {
event.stopPropagation(); return false;
};
globalPopupInner.classList.add('global-popup-inner'); globalPopupInner.classList.add('global-popup-inner');
globalPopup.appendChild(globalPopupInner) globalPopup.appendChild(globalPopupInner);
gradioApp().appendChild(globalPopup); gradioApp().appendChild(globalPopup);
} }
@ -149,31 +214,33 @@ function popup(contents){
globalPopup.style.display = "flex"; globalPopup.style.display = "flex";
} }
function extraNetworksShowMetadata(text){ function extraNetworksShowMetadata(text) {
var elem = document.createElement('pre') var elem = document.createElement('pre');
elem.classList.add('popup-metadata'); elem.classList.add('popup-metadata');
elem.textContent = text; elem.textContent = text;
popup(elem); popup(elem);
} }
function requestGet(url, data, handler, errorHandler){ function requestGet(url, data, handler, errorHandler) {
var xhr = new XMLHttpRequest(); var xhr = new XMLHttpRequest();
var args = Object.keys(data).map(function(k){ return encodeURIComponent(k) + '=' + encodeURIComponent(data[k]) }).join('&') var args = Object.keys(data).map(function(k) {
return encodeURIComponent(k) + '=' + encodeURIComponent(data[k]);
}).join('&');
xhr.open("GET", url + "?" + args, true); xhr.open("GET", url + "?" + args, true);
xhr.onreadystatechange = function () { xhr.onreadystatechange = function() {
if (xhr.readyState === 4) { if (xhr.readyState === 4) {
if (xhr.status === 200) { if (xhr.status === 200) {
try { try {
var js = JSON.parse(xhr.responseText); var js = JSON.parse(xhr.responseText);
handler(js) handler(js);
} catch (error) { } catch (error) {
console.error(error); console.error(error);
errorHandler() errorHandler();
} }
} else{ } else {
errorHandler() errorHandler();
} }
} }
}; };
@ -181,16 +248,18 @@ function requestGet(url, data, handler, errorHandler){
xhr.send(js); xhr.send(js);
} }
function extraNetworksRequestMetadata(event, extraPage, cardName){ function extraNetworksRequestMetadata(event, extraPage, cardName) {
var showError = function(){ extraNetworksShowMetadata("there was an error getting metadata"); } var showError = function() {
extraNetworksShowMetadata("there was an error getting metadata");
};
requestGet("./sd_extra_networks/metadata", {"page": extraPage, "item": cardName}, function(data){ requestGet("./sd_extra_networks/metadata", {page: extraPage, item: cardName}, function(data) {
if(data && data.metadata){ if (data && data.metadata) {
extraNetworksShowMetadata(data.metadata) extraNetworksShowMetadata(data.metadata);
} else{ } else {
showError() showError();
} }
}, showError) }, showError);
event.stopPropagation() event.stopPropagation();
} }

View File

@ -1,33 +1,35 @@
// attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes // attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes
let txt2img_gallery, img2img_gallery, modal = undefined; let txt2img_gallery, img2img_gallery, modal = undefined;
onUiUpdate(function(){ onAfterUiUpdate(function() {
if (!txt2img_gallery) { if (!txt2img_gallery) {
txt2img_gallery = attachGalleryListeners("txt2img") txt2img_gallery = attachGalleryListeners("txt2img");
} }
if (!img2img_gallery) { if (!img2img_gallery) {
img2img_gallery = attachGalleryListeners("img2img") img2img_gallery = attachGalleryListeners("img2img");
} }
if (!modal) { if (!modal) {
modal = gradioApp().getElementById('lightboxModal') modal = gradioApp().getElementById('lightboxModal');
modalObserver.observe(modal, { attributes : true, attributeFilter : ['style'] }); modalObserver.observe(modal, {attributes: true, attributeFilter: ['style']});
} }
}); });
let modalObserver = new MutationObserver(function(mutations) { let modalObserver = new MutationObserver(function(mutations) {
mutations.forEach(function(mutationRecord) { mutations.forEach(function(mutationRecord) {
let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText;
if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) {
gradioApp().getElementById(selectedTab+"_generation_info_button")?.click() gradioApp().getElementById(selectedTab + "_generation_info_button")?.click();
}
}); });
}); });
function attachGalleryListeners(tab_name) { function attachGalleryListeners(tab_name) {
var gallery = gradioApp().querySelector('#'+tab_name+'_gallery') var gallery = gradioApp().querySelector('#' + tab_name + '_gallery');
gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name+"_generation_info_button").click()); gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name + "_generation_info_button").click());
gallery?.addEventListener('keydown', (e) => { gallery?.addEventListener('keydown', (e) => {
if (e.keyCode == 37 || e.keyCode == 39) // left or right arrow if (e.keyCode == 37 || e.keyCode == 39) { // left or right arrow
gradioApp().getElementById(tab_name+"_generation_info_button").click() gradioApp().getElementById(tab_name + "_generation_info_button").click();
}
}); });
return gallery; return gallery;
} }

View File

@ -1,6 +1,6 @@
// mouseover tooltips for various UI elements // mouseover tooltips for various UI elements
titles = { var titles = {
"Sampling steps": "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results", "Sampling steps": "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results",
"Sampling method": "Which algorithm to use to produce the image", "Sampling method": "Which algorithm to use to produce the image",
"GFPGAN": "Restore low quality faces using GFPGAN neural network", "GFPGAN": "Restore low quality faces using GFPGAN neural network",
@ -9,12 +9,13 @@ titles = {
"UniPC": "Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models", "UniPC": "Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models",
"DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution", "DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution",
"\u{1F4D0}": "Auto detect size from img2img",
"Batch count": "How many batches of images to create (has no impact on generation performance or VRAM usage)", "Batch count": "How many batches of images to create (has no impact on generation performance or VRAM usage)",
"Batch size": "How many image to create in a single batch (increases generation performance at cost of higher VRAM usage)", "Batch size": "How many image to create in a single batch (increases generation performance at cost of higher VRAM usage)",
"CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results", "CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results",
"Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result", "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result",
"\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time", "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time",
"\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed", "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomized",
"\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.", "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
"\u{1f4c2}": "Open images output directory", "\u{1f4c2}": "Open images output directory",
"\u{1f4be}": "Save style", "\u{1f4be}": "Save style",
@ -66,8 +67,8 @@ titles = {
"Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.", "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.",
"Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [denoising], [clip_skip], [batch_number], [generation_number], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp], [hasprompt<prompt1|default><prompt2>..]; leave empty for default.", "Images filename pattern": "Use tags like [seed] and [date] to define how filenames for images are chosen. Leave empty for default.",
"Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [denoising], [clip_skip], [batch_number], [generation_number], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp], [hasprompt<prompt1|default><prompt2>..]; leave empty for default.", "Directory name pattern": "Use tags like [seed] and [date] to define how subdirectories for images and grids are chosen. Leave empty for default.",
"Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle", "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle",
"Loopback": "Performs img2img processing multiple times. Output images are used as input for the next loop.", "Loopback": "Performs img2img processing multiple times. Output images are used as input for the next loop.",
@ -111,23 +112,29 @@ titles = {
"Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders.", "Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders.",
"Multiplier for extra networks": "When adding extra network such as Hypernetwork or Lora to prompt, use this multiplier for it.", "Multiplier for extra networks": "When adding extra network such as Hypernetwork or Lora to prompt, use this multiplier for it.",
"Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.", "Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.",
"Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order lsited.", "Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order listed.",
"Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction." "Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction."
} };
function updateTooltip(element) {
if (element.title) return; // already has a title
onUiUpdate(function(){ let text = element.textContent;
gradioApp().querySelectorAll('span, button, select, p').forEach(function(span){ let tooltip = localization[titles[text]] || titles[text];
if (span.title) return; // already has a title
let tooltip = localization[titles[span.textContent]] || titles[span.textContent]; if (!tooltip) {
let value = element.value;
if(!tooltip){ if (value) tooltip = localization[titles[value]] || titles[value];
tooltip = localization[titles[span.value]] || titles[span.value];
} }
if(!tooltip){ if (!tooltip) {
for (const c of span.classList) { // Gradio dropdown options have `data-value`.
let dataValue = element.dataset.value;
if (dataValue) tooltip = localization[titles[dataValue]] || titles[dataValue];
}
if (!tooltip) {
for (const c of element.classList) {
if (c in titles) { if (c in titles) {
tooltip = localization[titles[c]] || titles[c]; tooltip = localization[titles[c]] || titles[c];
break; break;
@ -135,16 +142,54 @@ onUiUpdate(function(){
} }
} }
if(tooltip){ if (tooltip) {
span.title = tooltip; element.title = tooltip;
} }
}) }
gradioApp().querySelectorAll('select').forEach(function(select){ // Nodes to check for adding tooltips.
if (select.onchange != null) return; const tooltipCheckNodes = new Set();
// Timer for debouncing tooltip check.
let tooltipCheckTimer = null;
select.onchange = function(){ function processTooltipCheckNodes() {
select.title = localization[titles[select.value]] || titles[select.value] || ""; for (const node of tooltipCheckNodes) {
updateTooltip(node);
} }
}) tooltipCheckNodes.clear();
}) }
onUiUpdate(function(mutationRecords) {
for (const record of mutationRecords) {
if (record.type === "childList" && record.target.classList.contains("options")) {
// This smells like a Gradio dropdown menu having changed,
// so let's enqueue an update for the input element that shows the current value.
let wrap = record.target.parentNode;
let input = wrap?.querySelector("input");
if (input) {
input.title = ""; // So we'll even have a chance to update it.
tooltipCheckNodes.add(input);
}
}
for (const node of record.addedNodes) {
if (node.nodeType === Node.ELEMENT_NODE && !node.classList.contains("hide")) {
if (!node.title) {
if (
node.tagName === "SPAN" ||
node.tagName === "BUTTON" ||
node.tagName === "P" ||
node.tagName === "INPUT" ||
(node.tagName === "LI" && node.classList.contains("item")) // Gradio dropdown item
) {
tooltipCheckNodes.add(node);
}
}
node.querySelectorAll('span, button, p').forEach(n => tooltipCheckNodes.add(n));
}
}
}
if (tooltipCheckNodes.size) {
clearTimeout(tooltipCheckTimer);
tooltipCheckTimer = setTimeout(processTooltipCheckNodes, 1000);
}
});

View File

@ -1,18 +1,18 @@
function onCalcResolutionHires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y){ function onCalcResolutionHires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y) {
function setInactive(elem, inactive){ function setInactive(elem, inactive) {
elem.classList.toggle('inactive', !!inactive) elem.classList.toggle('inactive', !!inactive);
} }
var hrUpscaleBy = gradioApp().getElementById('txt2img_hr_scale') var hrUpscaleBy = gradioApp().getElementById('txt2img_hr_scale');
var hrResizeX = gradioApp().getElementById('txt2img_hr_resize_x') var hrResizeX = gradioApp().getElementById('txt2img_hr_resize_x');
var hrResizeY = gradioApp().getElementById('txt2img_hr_resize_y') var hrResizeY = gradioApp().getElementById('txt2img_hr_resize_y');
gradioApp().getElementById('txt2img_hires_fix_row2').style.display = opts.use_old_hires_fix_width_height ? "none" : "" gradioApp().getElementById('txt2img_hires_fix_row2').style.display = opts.use_old_hires_fix_width_height ? "none" : "";
setInactive(hrUpscaleBy, opts.use_old_hires_fix_width_height || hr_resize_x > 0 || hr_resize_y > 0) setInactive(hrUpscaleBy, opts.use_old_hires_fix_width_height || hr_resize_x > 0 || hr_resize_y > 0);
setInactive(hrResizeX, opts.use_old_hires_fix_width_height || hr_resize_x == 0) setInactive(hrResizeX, opts.use_old_hires_fix_width_height || hr_resize_x == 0);
setInactive(hrResizeY, opts.use_old_hires_fix_width_height || hr_resize_y == 0) setInactive(hrResizeY, opts.use_old_hires_fix_width_height || hr_resize_y == 0);
return [enable, width, height, hr_scale, hr_resize_x, hr_resize_y] return [enable, width, height, hr_scale, hr_resize_x, hr_resize_y];
} }

View File

@ -4,17 +4,16 @@
*/ */
function imageMaskResize() { function imageMaskResize() {
const canvases = gradioApp().querySelectorAll('#img2maskimg .touch-none canvas'); const canvases = gradioApp().querySelectorAll('#img2maskimg .touch-none canvas');
if ( ! canvases.length ) { if (!canvases.length) {
canvases_fixed = false; // TODO: this is unused..? window.removeEventListener('resize', imageMaskResize);
window.removeEventListener( 'resize', imageMaskResize );
return; return;
} }
const wrapper = canvases[0].closest('.touch-none'); const wrapper = canvases[0].closest('.touch-none');
const previewImage = wrapper.previousElementSibling; const previewImage = wrapper.previousElementSibling;
if ( ! previewImage.complete ) { if (!previewImage.complete) {
previewImage.addEventListener( 'load', imageMaskResize); previewImage.addEventListener('load', imageMaskResize);
return; return;
} }
@ -24,15 +23,15 @@ function imageMaskResize() {
const nh = previewImage.naturalHeight; const nh = previewImage.naturalHeight;
const portrait = nh > nw; const portrait = nh > nw;
const wW = Math.min(w, portrait ? h/nh*nw : w/nw*nw); const wW = Math.min(w, portrait ? h / nh * nw : w / nw * nw);
const wH = Math.min(h, portrait ? h/nh*nh : w/nw*nh); const wH = Math.min(h, portrait ? h / nh * nh : w / nw * nh);
wrapper.style.width = `${wW}px`; wrapper.style.width = `${wW}px`;
wrapper.style.height = `${wH}px`; wrapper.style.height = `${wH}px`;
wrapper.style.left = `0px`; wrapper.style.left = `0px`;
wrapper.style.top = `0px`; wrapper.style.top = `0px`;
canvases.forEach( c => { canvases.forEach(c => {
c.style.width = c.style.height = ''; c.style.width = c.style.height = '';
c.style.maxWidth = '100%'; c.style.maxWidth = '100%';
c.style.maxHeight = '100%'; c.style.maxHeight = '100%';
@ -40,5 +39,5 @@ function imageMaskResize() {
}); });
} }
onUiUpdate(imageMaskResize); onAfterUiUpdate(imageMaskResize);
window.addEventListener( 'resize', imageMaskResize); window.addEventListener('resize', imageMaskResize);

View File

@ -1,18 +0,0 @@
window.onload = (function(){
window.addEventListener('drop', e => {
const target = e.composedPath()[0];
if (target.placeholder.indexOf("Prompt") == -1) return;
let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image";
e.stopPropagation();
e.preventDefault();
const imgParent = gradioApp().getElementById(prompt_target);
const files = e.dataTransfer.files;
const fileInput = imgParent.querySelector('input[type="file"]');
if ( fileInput ) {
fileInput.files = files;
fileInput.dispatchEvent(new Event('change'));
}
});
});

View File

@ -5,24 +5,24 @@ function closeModal() {
function showModal(event) { function showModal(event) {
const source = event.target || event.srcElement; const source = event.target || event.srcElement;
const modalImage = gradioApp().getElementById("modalImage") const modalImage = gradioApp().getElementById("modalImage");
const lb = gradioApp().getElementById("lightboxModal") const lb = gradioApp().getElementById("lightboxModal");
modalImage.src = source.src modalImage.src = source.src;
if (modalImage.style.display === 'none') { if (modalImage.style.display === 'none') {
lb.style.setProperty('background-image', 'url(' + source.src + ')'); lb.style.setProperty('background-image', 'url(' + source.src + ')');
} }
lb.style.display = "flex"; lb.style.display = "flex";
lb.focus() lb.focus();
const tabTxt2Img = gradioApp().getElementById("tab_txt2img") const tabTxt2Img = gradioApp().getElementById("tab_txt2img");
const tabImg2Img = gradioApp().getElementById("tab_img2img") const tabImg2Img = gradioApp().getElementById("tab_img2img");
// show the save button in modal only on txt2img or img2img tabs // show the save button in modal only on txt2img or img2img tabs
if (tabTxt2Img.style.display != "none" || tabImg2Img.style.display != "none") { if (tabTxt2Img.style.display != "none" || tabImg2Img.style.display != "none") {
gradioApp().getElementById("modal_save").style.display = "inline" gradioApp().getElementById("modal_save").style.display = "inline";
} else { } else {
gradioApp().getElementById("modal_save").style.display = "none" gradioApp().getElementById("modal_save").style.display = "none";
} }
event.stopPropagation() event.stopPropagation();
} }
function negmod(n, m) { function negmod(n, m) {
@ -30,14 +30,15 @@ function negmod(n, m) {
} }
function updateOnBackgroundChange() { function updateOnBackgroundChange() {
const modalImage = gradioApp().getElementById("modalImage") const modalImage = gradioApp().getElementById("modalImage");
if (modalImage && modalImage.offsetParent) { if (modalImage && modalImage.offsetParent) {
let currentButton = selected_gallery_button(); let currentButton = selected_gallery_button();
if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) { if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) {
modalImage.src = currentButton.children[0].src; modalImage.src = currentButton.children[0].src;
if (modalImage.style.display === 'none') { if (modalImage.style.display === 'none') {
modal.style.setProperty('background-image', `url(${modalImage.src})`) const modal = gradioApp().getElementById("lightboxModal");
modal.style.setProperty('background-image', `url(${modalImage.src})`);
} }
} }
} }
@ -49,68 +50,68 @@ function modalImageSwitch(offset) {
if (galleryButtons.length > 1) { if (galleryButtons.length > 1) {
var currentButton = selected_gallery_button(); var currentButton = selected_gallery_button();
var result = -1 var result = -1;
galleryButtons.forEach(function(v, i) { galleryButtons.forEach(function(v, i) {
if (v == currentButton) { if (v == currentButton) {
result = i result = i;
} }
}) });
if (result != -1) { if (result != -1) {
var nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)] var nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)];
nextButton.click() nextButton.click();
const modalImage = gradioApp().getElementById("modalImage"); const modalImage = gradioApp().getElementById("modalImage");
const modal = gradioApp().getElementById("lightboxModal"); const modal = gradioApp().getElementById("lightboxModal");
modalImage.src = nextButton.children[0].src; modalImage.src = nextButton.children[0].src;
if (modalImage.style.display === 'none') { if (modalImage.style.display === 'none') {
modal.style.setProperty('background-image', `url(${modalImage.src})`) modal.style.setProperty('background-image', `url(${modalImage.src})`);
} }
setTimeout(function() { setTimeout(function() {
modal.focus() modal.focus();
}, 10) }, 10);
} }
} }
} }
function saveImage(){ function saveImage() {
const tabTxt2Img = gradioApp().getElementById("tab_txt2img") const tabTxt2Img = gradioApp().getElementById("tab_txt2img");
const tabImg2Img = gradioApp().getElementById("tab_img2img") const tabImg2Img = gradioApp().getElementById("tab_img2img");
const saveTxt2Img = "save_txt2img" const saveTxt2Img = "save_txt2img";
const saveImg2Img = "save_img2img" const saveImg2Img = "save_img2img";
if (tabTxt2Img.style.display != "none") { if (tabTxt2Img.style.display != "none") {
gradioApp().getElementById(saveTxt2Img).click() gradioApp().getElementById(saveTxt2Img).click();
} else if (tabImg2Img.style.display != "none") { } else if (tabImg2Img.style.display != "none") {
gradioApp().getElementById(saveImg2Img).click() gradioApp().getElementById(saveImg2Img).click();
} else { } else {
console.error("missing implementation for saving modal of this type") console.error("missing implementation for saving modal of this type");
} }
} }
function modalSaveImage(event) { function modalSaveImage(event) {
saveImage() saveImage();
event.stopPropagation() event.stopPropagation();
} }
function modalNextImage(event) { function modalNextImage(event) {
modalImageSwitch(1) modalImageSwitch(1);
event.stopPropagation() event.stopPropagation();
} }
function modalPrevImage(event) { function modalPrevImage(event) {
modalImageSwitch(-1) modalImageSwitch(-1);
event.stopPropagation() event.stopPropagation();
} }
function modalKeyHandler(event) { function modalKeyHandler(event) {
switch (event.key) { switch (event.key) {
case "s": case "s":
saveImage() saveImage();
break; break;
case "ArrowLeft": case "ArrowLeft":
modalPrevImage(event) modalPrevImage(event);
break; break;
case "ArrowRight": case "ArrowRight":
modalNextImage(event) modalNextImage(event);
break; break;
case "Escape": case "Escape":
closeModal(); closeModal();
@ -119,38 +120,39 @@ function modalKeyHandler(event) {
} }
function setupImageForLightbox(e) { function setupImageForLightbox(e) {
if (e.dataset.modded) if (e.dataset.modded) {
return; return;
}
e.dataset.modded = true; e.dataset.modded = true;
e.style.cursor='pointer' e.style.cursor = 'pointer';
e.style.userSelect='none' e.style.userSelect = 'none';
var isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1 var isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1;
// For Firefox, listening on click first switched to next image then shows the lightbox. // For Firefox, listening on click first switched to next image then shows the lightbox.
// If you know how to fix this without switching to mousedown event, please. // If you know how to fix this without switching to mousedown event, please.
// For other browsers the event is click to make it possiblr to drag picture. // For other browsers the event is click to make it possiblr to drag picture.
var event = isFirefox ? 'mousedown' : 'click' var event = isFirefox ? 'mousedown' : 'click';
e.addEventListener(event, function (evt) { e.addEventListener(event, function(evt) {
if(!opts.js_modal_lightbox || evt.button != 0) return; if (!opts.js_modal_lightbox || evt.button != 0) return;
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed) modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed);
evt.preventDefault() evt.preventDefault();
showModal(evt) showModal(evt);
}, true); }, true);
} }
function modalZoomSet(modalImage, enable) { function modalZoomSet(modalImage, enable) {
if(modalImage) modalImage.classList.toggle('modalImageFullscreen', !!enable); if (modalImage) modalImage.classList.toggle('modalImageFullscreen', !!enable);
} }
function modalZoomToggle(event) { function modalZoomToggle(event) {
var modalImage = gradioApp().getElementById("modalImage"); var modalImage = gradioApp().getElementById("modalImage");
modalZoomSet(modalImage, !modalImage.classList.contains('modalImageFullscreen')) modalZoomSet(modalImage, !modalImage.classList.contains('modalImageFullscreen'));
event.stopPropagation() event.stopPropagation();
} }
function modalTileImageToggle(event) { function modalTileImageToggle(event) {
@ -159,93 +161,87 @@ function modalTileImageToggle(event) {
const isTiling = modalImage.style.display === 'none'; const isTiling = modalImage.style.display === 'none';
if (isTiling) { if (isTiling) {
modalImage.style.display = 'block'; modalImage.style.display = 'block';
modal.style.setProperty('background-image', 'none') modal.style.setProperty('background-image', 'none');
} else { } else {
modalImage.style.display = 'none'; modalImage.style.display = 'none';
modal.style.setProperty('background-image', `url(${modalImage.src})`) modal.style.setProperty('background-image', `url(${modalImage.src})`);
} }
event.stopPropagation() event.stopPropagation();
} }
function galleryImageHandler(e) { onAfterUiUpdate(function() {
//if (e && e.parentElement.tagName == 'BUTTON') { var fullImg_preview = gradioApp().querySelectorAll('.gradio-gallery > div > img');
e.onclick = showGalleryImage;
//}
}
onUiUpdate(function() {
var fullImg_preview = gradioApp().querySelectorAll('.gradio-gallery > div > img')
if (fullImg_preview != null) { if (fullImg_preview != null) {
fullImg_preview.forEach(setupImageForLightbox); fullImg_preview.forEach(setupImageForLightbox);
} }
updateOnBackgroundChange(); updateOnBackgroundChange();
}) });
document.addEventListener("DOMContentLoaded", function() { document.addEventListener("DOMContentLoaded", function() {
//const modalFragment = document.createDocumentFragment(); //const modalFragment = document.createDocumentFragment();
const modal = document.createElement('div') const modal = document.createElement('div');
modal.onclick = closeModal; modal.onclick = closeModal;
modal.id = "lightboxModal"; modal.id = "lightboxModal";
modal.tabIndex = 0 modal.tabIndex = 0;
modal.addEventListener('keydown', modalKeyHandler, true) modal.addEventListener('keydown', modalKeyHandler, true);
const modalControls = document.createElement('div') const modalControls = document.createElement('div');
modalControls.className = 'modalControls gradio-container'; modalControls.className = 'modalControls gradio-container';
modal.append(modalControls); modal.append(modalControls);
const modalZoom = document.createElement('span') const modalZoom = document.createElement('span');
modalZoom.className = 'modalZoom cursor'; modalZoom.className = 'modalZoom cursor';
modalZoom.innerHTML = '&#10529;' modalZoom.innerHTML = '&#10529;';
modalZoom.addEventListener('click', modalZoomToggle, true) modalZoom.addEventListener('click', modalZoomToggle, true);
modalZoom.title = "Toggle zoomed view"; modalZoom.title = "Toggle zoomed view";
modalControls.appendChild(modalZoom) modalControls.appendChild(modalZoom);
const modalTileImage = document.createElement('span') const modalTileImage = document.createElement('span');
modalTileImage.className = 'modalTileImage cursor'; modalTileImage.className = 'modalTileImage cursor';
modalTileImage.innerHTML = '&#8862;' modalTileImage.innerHTML = '&#8862;';
modalTileImage.addEventListener('click', modalTileImageToggle, true) modalTileImage.addEventListener('click', modalTileImageToggle, true);
modalTileImage.title = "Preview tiling"; modalTileImage.title = "Preview tiling";
modalControls.appendChild(modalTileImage) modalControls.appendChild(modalTileImage);
const modalSave = document.createElement("span") const modalSave = document.createElement("span");
modalSave.className = "modalSave cursor" modalSave.className = "modalSave cursor";
modalSave.id = "modal_save" modalSave.id = "modal_save";
modalSave.innerHTML = "&#x1F5AB;" modalSave.innerHTML = "&#x1F5AB;";
modalSave.addEventListener("click", modalSaveImage, true) modalSave.addEventListener("click", modalSaveImage, true);
modalSave.title = "Save Image(s)" modalSave.title = "Save Image(s)";
modalControls.appendChild(modalSave) modalControls.appendChild(modalSave);
const modalClose = document.createElement('span') const modalClose = document.createElement('span');
modalClose.className = 'modalClose cursor'; modalClose.className = 'modalClose cursor';
modalClose.innerHTML = '&times;' modalClose.innerHTML = '&times;';
modalClose.onclick = closeModal; modalClose.onclick = closeModal;
modalClose.title = "Close image viewer"; modalClose.title = "Close image viewer";
modalControls.appendChild(modalClose) modalControls.appendChild(modalClose);
const modalImage = document.createElement('img') const modalImage = document.createElement('img');
modalImage.id = 'modalImage'; modalImage.id = 'modalImage';
modalImage.onclick = closeModal; modalImage.onclick = closeModal;
modalImage.tabIndex = 0 modalImage.tabIndex = 0;
modalImage.addEventListener('keydown', modalKeyHandler, true) modalImage.addEventListener('keydown', modalKeyHandler, true);
modal.appendChild(modalImage) modal.appendChild(modalImage);
const modalPrev = document.createElement('a') const modalPrev = document.createElement('a');
modalPrev.className = 'modalPrev'; modalPrev.className = 'modalPrev';
modalPrev.innerHTML = '&#10094;' modalPrev.innerHTML = '&#10094;';
modalPrev.tabIndex = 0 modalPrev.tabIndex = 0;
modalPrev.addEventListener('click', modalPrevImage, true); modalPrev.addEventListener('click', modalPrevImage, true);
modalPrev.addEventListener('keydown', modalKeyHandler, true) modalPrev.addEventListener('keydown', modalKeyHandler, true);
modal.appendChild(modalPrev) modal.appendChild(modalPrev);
const modalNext = document.createElement('a') const modalNext = document.createElement('a');
modalNext.className = 'modalNext'; modalNext.className = 'modalNext';
modalNext.innerHTML = '&#10095;' modalNext.innerHTML = '&#10095;';
modalNext.tabIndex = 0 modalNext.tabIndex = 0;
modalNext.addEventListener('click', modalNextImage, true); modalNext.addEventListener('click', modalNextImage, true);
modalNext.addEventListener('keydown', modalKeyHandler, true) modalNext.addEventListener('keydown', modalKeyHandler, true);
modal.appendChild(modalNext) modal.appendChild(modalNext);
try { try {
gradioApp().appendChild(modal); gradioApp().appendChild(modal);

View File

@ -1,7 +1,9 @@
let gamepads = [];
window.addEventListener('gamepadconnected', (e) => { window.addEventListener('gamepadconnected', (e) => {
const index = e.gamepad.index; const index = e.gamepad.index;
let isWaiting = false; let isWaiting = false;
setInterval(async () => { gamepads[index] = setInterval(async() => {
if (!opts.js_modal_lightbox_gamepad || isWaiting) return; if (!opts.js_modal_lightbox_gamepad || isWaiting) return;
const gamepad = navigator.getGamepads()[index]; const gamepad = navigator.getGamepads()[index];
const xValue = gamepad.axes[0]; const xValue = gamepad.axes[0];
@ -14,7 +16,7 @@ window.addEventListener('gamepadconnected', (e) => {
} }
if (isWaiting) { if (isWaiting) {
await sleepUntil(() => { await sleepUntil(() => {
const xValue = navigator.getGamepads()[index].axes[0] const xValue = navigator.getGamepads()[index].axes[0];
if (xValue < 0.3 && xValue > -0.3) { if (xValue < 0.3 && xValue > -0.3) {
return true; return true;
} }
@ -24,6 +26,10 @@ window.addEventListener('gamepadconnected', (e) => {
}, 10); }, 10);
}); });
window.addEventListener('gamepaddisconnected', (e) => {
clearInterval(gamepads[e.gamepad.index]);
});
/* /*
Primarily for vr controller type pointer devices. Primarily for vr controller type pointer devices.
I use the wheel event because there's currently no way to do it properly with web xr. I use the wheel event because there's currently no way to do it properly with web xr.

View File

@ -1,10 +1,9 @@
// localization = {} -- the dict with translations is created by the backend // localization = {} -- the dict with translations is created by the backend
ignore_ids_for_localization={ var ignore_ids_for_localization = {
setting_sd_hypernetwork: 'OPTION', setting_sd_hypernetwork: 'OPTION',
setting_sd_model_checkpoint: 'OPTION', setting_sd_model_checkpoint: 'OPTION',
setting_realesrgan_enabled_models: 'OPTION',
modelmerger_primary_model_name: 'OPTION', modelmerger_primary_model_name: 'OPTION',
modelmerger_secondary_model_name: 'OPTION', modelmerger_secondary_model_name: 'OPTION',
modelmerger_tertiary_model_name: 'OPTION', modelmerger_tertiary_model_name: 'OPTION',
@ -17,111 +16,111 @@ ignore_ids_for_localization={
setting_realesrgan_enabled_models: 'SPAN', setting_realesrgan_enabled_models: 'SPAN',
extras_upscaler_1: 'SPAN', extras_upscaler_1: 'SPAN',
extras_upscaler_2: 'SPAN', extras_upscaler_2: 'SPAN',
} };
re_num = /^[\.\d]+$/ var re_num = /^[.\d]+$/;
re_emoji = /[\p{Extended_Pictographic}\u{1F3FB}-\u{1F3FF}\u{1F9B0}-\u{1F9B3}]/u var re_emoji = /[\p{Extended_Pictographic}\u{1F3FB}-\u{1F3FF}\u{1F9B0}-\u{1F9B3}]/u;
original_lines = {} var original_lines = {};
translated_lines = {} var translated_lines = {};
function hasLocalization() { function hasLocalization() {
return window.localization && Object.keys(window.localization).length > 0; return window.localization && Object.keys(window.localization).length > 0;
} }
function textNodesUnder(el){ function textNodesUnder(el) {
var n, a=[], walk=document.createTreeWalker(el,NodeFilter.SHOW_TEXT,null,false); var n, a = [], walk = document.createTreeWalker(el, NodeFilter.SHOW_TEXT, null, false);
while(n=walk.nextNode()) a.push(n); while ((n = walk.nextNode())) a.push(n);
return a; return a;
} }
function canBeTranslated(node, text){ function canBeTranslated(node, text) {
if(! text) return false; if (!text) return false;
if(! node.parentElement) return false; if (!node.parentElement) return false;
var parentType = node.parentElement.nodeName var parentType = node.parentElement.nodeName;
if(parentType=='SCRIPT' || parentType=='STYLE' || parentType=='TEXTAREA') return false; if (parentType == 'SCRIPT' || parentType == 'STYLE' || parentType == 'TEXTAREA') return false;
if (parentType=='OPTION' || parentType=='SPAN'){ if (parentType == 'OPTION' || parentType == 'SPAN') {
var pnode = node var pnode = node;
for(var level=0; level<4; level++){ for (var level = 0; level < 4; level++) {
pnode = pnode.parentElement pnode = pnode.parentElement;
if(! pnode) break; if (!pnode) break;
if(ignore_ids_for_localization[pnode.id] == parentType) return false; if (ignore_ids_for_localization[pnode.id] == parentType) return false;
} }
} }
if(re_num.test(text)) return false; if (re_num.test(text)) return false;
if(re_emoji.test(text)) return false; if (re_emoji.test(text)) return false;
return true return true;
} }
function getTranslation(text){ function getTranslation(text) {
if(! text) return undefined if (!text) return undefined;
if(translated_lines[text] === undefined){ if (translated_lines[text] === undefined) {
original_lines[text] = 1 original_lines[text] = 1;
} }
tl = localization[text] var tl = localization[text];
if(tl !== undefined){ if (tl !== undefined) {
translated_lines[tl] = 1 translated_lines[tl] = 1;
} }
return tl return tl;
} }
function processTextNode(node){ function processTextNode(node) {
var text = node.textContent.trim() var text = node.textContent.trim();
if(! canBeTranslated(node, text)) return if (!canBeTranslated(node, text)) return;
tl = getTranslation(text) var tl = getTranslation(text);
if(tl !== undefined){ if (tl !== undefined) {
node.textContent = tl node.textContent = tl;
} }
} }
function processNode(node){ function processNode(node) {
if(node.nodeType == 3){ if (node.nodeType == 3) {
processTextNode(node) processTextNode(node);
return return;
} }
if(node.title){ if (node.title) {
tl = getTranslation(node.title) let tl = getTranslation(node.title);
if(tl !== undefined){ if (tl !== undefined) {
node.title = tl node.title = tl;
} }
} }
if(node.placeholder){ if (node.placeholder) {
tl = getTranslation(node.placeholder) let tl = getTranslation(node.placeholder);
if(tl !== undefined){ if (tl !== undefined) {
node.placeholder = tl node.placeholder = tl;
} }
} }
textNodesUnder(node).forEach(function(node){ textNodesUnder(node).forEach(function(node) {
processTextNode(node) processTextNode(node);
}) });
} }
function dumpTranslations(){ function dumpTranslations() {
if(!hasLocalization()) { if (!hasLocalization()) {
// If we don't have any localization, // If we don't have any localization,
// we will not have traversed the app to find // we will not have traversed the app to find
// original_lines, so do that now. // original_lines, so do that now.
processNode(gradioApp()); processNode(gradioApp());
} }
var dumped = {} var dumped = {};
if (localization.rtl) { if (localization.rtl) {
dumped.rtl = true; dumped.rtl = true;
} }
for (const text in original_lines) { for (const text in original_lines) {
if(dumped[text] !== undefined) continue; if (dumped[text] !== undefined) continue;
dumped[text] = localization[text] || text; dumped[text] = localization[text] || text;
} }
@ -129,7 +128,7 @@ function dumpTranslations(){
} }
function download_localization() { function download_localization() {
var text = JSON.stringify(dumpTranslations(), null, 4) var text = JSON.stringify(dumpTranslations(), null, 4);
var element = document.createElement('a'); var element = document.createElement('a');
element.setAttribute('href', 'data:text/plain;charset=utf-8,' + encodeURIComponent(text)); element.setAttribute('href', 'data:text/plain;charset=utf-8,' + encodeURIComponent(text));
@ -142,20 +141,20 @@ function download_localization() {
document.body.removeChild(element); document.body.removeChild(element);
} }
document.addEventListener("DOMContentLoaded", function () { document.addEventListener("DOMContentLoaded", function() {
if (!hasLocalization()) { if (!hasLocalization()) {
return; return;
} }
onUiUpdate(function (m) { onUiUpdate(function(m) {
m.forEach(function (mutation) { m.forEach(function(mutation) {
mutation.addedNodes.forEach(function (node) { mutation.addedNodes.forEach(function(node) {
processNode(node) processNode(node);
}) });
});
}); });
})
processNode(gradioApp()) processNode(gradioApp());
if (localization.rtl) { // if the language is from right to left, if (localization.rtl) { // if the language is from right to left,
(new MutationObserver((mutations, observer) => { // wait for the style to load (new MutationObserver((mutations, observer) => { // wait for the style to load
@ -170,8 +169,8 @@ document.addEventListener("DOMContentLoaded", function () {
} }
} }
} }
})
}); });
})).observe(gradioApp(), { childList: true }); });
})).observe(gradioApp(), {childList: true});
} }
}) });

View File

@ -4,14 +4,14 @@ let lastHeadImg = null;
let notificationButton = null; let notificationButton = null;
onUiUpdate(function(){ onAfterUiUpdate(function() {
if(notificationButton == null){ if (notificationButton == null) {
notificationButton = gradioApp().getElementById('request_notifications') notificationButton = gradioApp().getElementById('request_notifications');
if(notificationButton != null){ if (notificationButton != null) {
notificationButton.addEventListener('click', () => { notificationButton.addEventListener('click', () => {
void Notification.requestPermission(); void Notification.requestPermission();
},true); }, true);
} }
} }
@ -42,7 +42,7 @@ onUiUpdate(function(){
} }
); );
notification.onclick = function(_){ notification.onclick = function(_) {
parent.focus(); parent.focus();
this.close(); this.close();
}; };

View File

@ -0,0 +1,153 @@
function createRow(table, cellName, items) {
var tr = document.createElement('tr');
var res = [];
items.forEach(function(x, i) {
if (x === undefined) {
res.push(null);
return;
}
var td = document.createElement(cellName);
td.textContent = x;
tr.appendChild(td);
res.push(td);
var colspan = 1;
for (var n = i + 1; n < items.length; n++) {
if (items[n] !== undefined) {
break;
}
colspan += 1;
}
if (colspan > 1) {
td.colSpan = colspan;
}
});
table.appendChild(tr);
return res;
}
function showProfile(path, cutoff = 0.05) {
requestGet(path, {}, function(data) {
var table = document.createElement('table');
table.className = 'popup-table';
data.records['total'] = data.total;
var keys = Object.keys(data.records).sort(function(a, b) {
return data.records[b] - data.records[a];
});
var items = keys.map(function(x) {
return {key: x, parts: x.split('/'), time: data.records[x]};
});
var maxLength = items.reduce(function(a, b) {
return Math.max(a, b.parts.length);
}, 0);
var cols = createRow(table, 'th', ['record', 'seconds']);
cols[0].colSpan = maxLength;
function arraysEqual(a, b) {
return !(a < b || b < a);
}
var addLevel = function(level, parent, hide) {
var matching = items.filter(function(x) {
return x.parts[level] && !x.parts[level + 1] && arraysEqual(x.parts.slice(0, level), parent);
});
var sorted = matching.sort(function(a, b) {
return b.time - a.time;
});
var othersTime = 0;
var othersList = [];
var othersRows = [];
var childrenRows = [];
sorted.forEach(function(x) {
var visible = x.time >= cutoff && !hide;
var cells = [];
for (var i = 0; i < maxLength; i++) {
cells.push(x.parts[i]);
}
cells.push(x.time.toFixed(3));
var cols = createRow(table, 'td', cells);
for (i = 0; i < level; i++) {
cols[i].className = 'muted';
}
var tr = cols[0].parentNode;
if (!visible) {
tr.classList.add("hidden");
}
if (x.time >= cutoff) {
childrenRows.push(tr);
} else {
othersTime += x.time;
othersList.push(x.parts[level]);
othersRows.push(tr);
}
var children = addLevel(level + 1, parent.concat([x.parts[level]]), true);
if (children.length > 0) {
var cell = cols[level];
var onclick = function() {
cell.classList.remove("link");
cell.removeEventListener("click", onclick);
children.forEach(function(x) {
x.classList.remove("hidden");
});
};
cell.classList.add("link");
cell.addEventListener("click", onclick);
}
});
if (othersTime > 0) {
var cells = [];
for (var i = 0; i < maxLength; i++) {
cells.push(parent[i]);
}
cells.push(othersTime.toFixed(3));
cells[level] = 'others';
var cols = createRow(table, 'td', cells);
for (i = 0; i < level; i++) {
cols[i].className = 'muted';
}
var cell = cols[level];
var tr = cell.parentNode;
var onclick = function() {
tr.classList.add("hidden");
cell.classList.remove("link");
cell.removeEventListener("click", onclick);
othersRows.forEach(function(x) {
x.classList.remove("hidden");
});
};
cell.title = othersList.join(", ");
cell.classList.add("link");
cell.addEventListener("click", onclick);
if (hide) {
tr.classList.add("hidden");
}
childrenRows.push(tr);
}
return childrenRows;
};
addLevel(0, []);
popup(table);
});
}

View File

@ -1,29 +1,29 @@
// code related to showing and updating progressbar shown as the image is being made // code related to showing and updating progressbar shown as the image is being made
function rememberGallerySelection(){ function rememberGallerySelection() {
} }
function getGallerySelectedIndex(){ function getGallerySelectedIndex() {
} }
function request(url, data, handler, errorHandler){ function request(url, data, handler, errorHandler) {
var xhr = new XMLHttpRequest(); var xhr = new XMLHttpRequest();
xhr.open("POST", url, true); xhr.open("POST", url, true);
xhr.setRequestHeader("Content-Type", "application/json"); xhr.setRequestHeader("Content-Type", "application/json");
xhr.onreadystatechange = function () { xhr.onreadystatechange = function() {
if (xhr.readyState === 4) { if (xhr.readyState === 4) {
if (xhr.status === 200) { if (xhr.status === 200) {
try { try {
var js = JSON.parse(xhr.responseText); var js = JSON.parse(xhr.responseText);
handler(js) handler(js);
} catch (error) { } catch (error) {
console.error(error); console.error(error);
errorHandler() errorHandler();
} }
} else{ } else {
errorHandler() errorHandler();
} }
} }
}; };
@ -31,147 +31,147 @@ function request(url, data, handler, errorHandler){
xhr.send(js); xhr.send(js);
} }
function pad2(x){ function pad2(x) {
return x<10 ? '0'+x : x return x < 10 ? '0' + x : x;
} }
function formatTime(secs){ function formatTime(secs) {
if(secs > 3600){ if (secs > 3600) {
return pad2(Math.floor(secs/60/60)) + ":" + pad2(Math.floor(secs/60)%60) + ":" + pad2(Math.floor(secs)%60) return pad2(Math.floor(secs / 60 / 60)) + ":" + pad2(Math.floor(secs / 60) % 60) + ":" + pad2(Math.floor(secs) % 60);
} else if(secs > 60){ } else if (secs > 60) {
return pad2(Math.floor(secs/60)) + ":" + pad2(Math.floor(secs)%60) return pad2(Math.floor(secs / 60)) + ":" + pad2(Math.floor(secs) % 60);
} else{ } else {
return Math.floor(secs) + "s" return Math.floor(secs) + "s";
} }
} }
function setTitle(progress){ function setTitle(progress) {
var title = 'Stable Diffusion' var title = 'Stable Diffusion';
if(opts.show_progress_in_title && progress){ if (opts.show_progress_in_title && progress) {
title = '[' + progress.trim() + '] ' + title; title = '[' + progress.trim() + '] ' + title;
} }
if(document.title != title){ if (document.title != title) {
document.title = title; document.title = title;
} }
} }
function randomId(){ function randomId() {
return "task(" + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7)+")" return "task(" + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7) + Math.random().toString(36).slice(2, 7) + ")";
} }
// starts sending progress requests to "/internal/progress" uri, creating progressbar above progressbarContainer element and // starts sending progress requests to "/internal/progress" uri, creating progressbar above progressbarContainer element and
// preview inside gallery element. Cleans up all created stuff when the task is over and calls atEnd. // preview inside gallery element. Cleans up all created stuff when the task is over and calls atEnd.
// calls onProgress every time there is a progress update // calls onProgress every time there is a progress update
function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgress, inactivityTimeout=40){ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgress, inactivityTimeout = 40) {
var dateStart = new Date() var dateStart = new Date();
var wasEverActive = false var wasEverActive = false;
var parentProgressbar = progressbarContainer.parentNode var parentProgressbar = progressbarContainer.parentNode;
var parentGallery = gallery ? gallery.parentNode : null var parentGallery = gallery ? gallery.parentNode : null;
var divProgress = document.createElement('div') var divProgress = document.createElement('div');
divProgress.className='progressDiv' divProgress.className = 'progressDiv';
divProgress.style.display = opts.show_progressbar ? "block" : "none" divProgress.style.display = opts.show_progressbar ? "block" : "none";
var divInner = document.createElement('div') var divInner = document.createElement('div');
divInner.className='progress' divInner.className = 'progress';
divProgress.appendChild(divInner) divProgress.appendChild(divInner);
parentProgressbar.insertBefore(divProgress, progressbarContainer) parentProgressbar.insertBefore(divProgress, progressbarContainer);
if(parentGallery){ if (parentGallery) {
var livePreview = document.createElement('div') var livePreview = document.createElement('div');
livePreview.className='livePreview' livePreview.className = 'livePreview';
parentGallery.insertBefore(livePreview, gallery) parentGallery.insertBefore(livePreview, gallery);
} }
var removeProgressBar = function(){ var removeProgressBar = function() {
setTitle("") setTitle("");
parentProgressbar.removeChild(divProgress) parentProgressbar.removeChild(divProgress);
if(parentGallery) parentGallery.removeChild(livePreview) if (parentGallery) parentGallery.removeChild(livePreview);
atEnd() atEnd();
};
var fun = function(id_task, id_live_preview) {
request("./internal/progress", {id_task: id_task, id_live_preview: id_live_preview}, function(res) {
if (res.completed) {
removeProgressBar();
return;
} }
var fun = function(id_task, id_live_preview){ var rect = progressbarContainer.getBoundingClientRect();
request("./internal/progress", {"id_task": id_task, "id_live_preview": id_live_preview}, function(res){
if(res.completed){
removeProgressBar()
return
}
var rect = progressbarContainer.getBoundingClientRect() if (rect.width) {
if(rect.width){
divProgress.style.width = rect.width + "px"; divProgress.style.width = rect.width + "px";
} }
let progressText = "" let progressText = "";
divInner.style.width = ((res.progress || 0) * 100.0) + '%' divInner.style.width = ((res.progress || 0) * 100.0) + '%';
divInner.style.background = res.progress ? "" : "transparent" divInner.style.background = res.progress ? "" : "transparent";
if(res.progress > 0){ if (res.progress > 0) {
progressText = ((res.progress || 0) * 100.0).toFixed(0) + '%' progressText = ((res.progress || 0) * 100.0).toFixed(0) + '%';
} }
if(res.eta){ if (res.eta) {
progressText += " ETA: " + formatTime(res.eta) progressText += " ETA: " + formatTime(res.eta);
} }
setTitle(progressText) setTitle(progressText);
if(res.textinfo && res.textinfo.indexOf("\n") == -1){ if (res.textinfo && res.textinfo.indexOf("\n") == -1) {
progressText = res.textinfo + " " + progressText progressText = res.textinfo + " " + progressText;
} }
divInner.textContent = progressText divInner.textContent = progressText;
var elapsedFromStart = (new Date() - dateStart) / 1000 var elapsedFromStart = (new Date() - dateStart) / 1000;
if(res.active) wasEverActive = true; if (res.active) wasEverActive = true;
if(! res.active && wasEverActive){ if (!res.active && wasEverActive) {
removeProgressBar() removeProgressBar();
return return;
} }
if(elapsedFromStart > inactivityTimeout && !res.queued && !res.active){ if (elapsedFromStart > inactivityTimeout && !res.queued && !res.active) {
removeProgressBar() removeProgressBar();
return return;
} }
if(res.live_preview && gallery){ if (res.live_preview && gallery) {
var rect = gallery.getBoundingClientRect() rect = gallery.getBoundingClientRect();
if(rect.width){ if (rect.width) {
livePreview.style.width = rect.width + "px" livePreview.style.width = rect.width + "px";
livePreview.style.height = rect.height + "px" livePreview.style.height = rect.height + "px";
} }
var img = new Image(); var img = new Image();
img.onload = function() { img.onload = function() {
livePreview.appendChild(img) livePreview.appendChild(img);
if(livePreview.childElementCount > 2){ if (livePreview.childElementCount > 2) {
livePreview.removeChild(livePreview.firstElementChild) livePreview.removeChild(livePreview.firstElementChild);
}
} }
};
img.src = res.live_preview; img.src = res.live_preview;
} }
if(onProgress){ if (onProgress) {
onProgress(res) onProgress(res);
} }
setTimeout(() => { setTimeout(() => {
fun(id_task, res.id_live_preview); fun(id_task, res.id_live_preview);
}, opts.live_preview_refresh_period || 500) }, opts.live_preview_refresh_period || 500);
}, function(){ }, function() {
removeProgressBar() removeProgressBar();
}) });
} };
fun(id_task, 0) fun(id_task, 0);
} }

View File

@ -1,17 +1,17 @@
function start_training_textual_inversion(){ function start_training_textual_inversion() {
gradioApp().querySelector('#ti_error').innerHTML='' gradioApp().querySelector('#ti_error').innerHTML = '';
var id = randomId() var id = randomId();
requestProgress(id, gradioApp().getElementById('ti_output'), gradioApp().getElementById('ti_gallery'), function(){}, function(progress){ requestProgress(id, gradioApp().getElementById('ti_output'), gradioApp().getElementById('ti_gallery'), function() {}, function(progress) {
gradioApp().getElementById('ti_progress').innerHTML = progress.textinfo gradioApp().getElementById('ti_progress').innerHTML = progress.textinfo;
}) });
var res = args_to_array(arguments) var res = Array.from(arguments);
res[0] = id res[0] = id;
return res return res;
} }

View File

@ -0,0 +1,83 @@
let promptTokenCountDebounceTime = 800;
let promptTokenCountTimeouts = {};
var promptTokenCountUpdateFunctions = {};
function update_txt2img_tokens(...args) {
// Called from Gradio
update_token_counter("txt2img_token_button");
if (args.length == 2) {
return args[0];
}
return args;
}
function update_img2img_tokens(...args) {
// Called from Gradio
update_token_counter("img2img_token_button");
if (args.length == 2) {
return args[0];
}
return args;
}
function update_token_counter(button_id) {
if (opts.disable_token_counters) {
return;
}
if (promptTokenCountTimeouts[button_id]) {
clearTimeout(promptTokenCountTimeouts[button_id]);
}
promptTokenCountTimeouts[button_id] = setTimeout(
() => gradioApp().getElementById(button_id)?.click(),
promptTokenCountDebounceTime,
);
}
function recalculatePromptTokens(name) {
promptTokenCountUpdateFunctions[name]?.();
}
function recalculate_prompts_txt2img() {
// Called from Gradio
recalculatePromptTokens('txt2img_prompt');
recalculatePromptTokens('txt2img_neg_prompt');
return Array.from(arguments);
}
function recalculate_prompts_img2img() {
// Called from Gradio
recalculatePromptTokens('img2img_prompt');
recalculatePromptTokens('img2img_neg_prompt');
return Array.from(arguments);
}
function setupTokenCounting(id, id_counter, id_button) {
var prompt = gradioApp().getElementById(id);
var counter = gradioApp().getElementById(id_counter);
var textarea = gradioApp().querySelector(`#${id} > label > textarea`);
if (opts.disable_token_counters) {
counter.style.display = "none";
return;
}
if (counter.parentElement == prompt.parentElement) {
return;
}
prompt.parentElement.insertBefore(counter, prompt);
prompt.parentElement.style.position = "relative";
promptTokenCountUpdateFunctions[id] = function() {
update_token_counter(id_button);
};
textarea.addEventListener("input", promptTokenCountUpdateFunctions[id]);
}
function setupTokenCounters() {
setupTokenCounting('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button');
setupTokenCounting('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button');
setupTokenCounting('img2img_prompt', 'img2img_token_counter', 'img2img_token_button');
setupTokenCounting('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button');
}

View File

@ -1,7 +1,7 @@
// various functions for interaction with ui.py not large enough to warrant putting them in separate files // various functions for interaction with ui.py not large enough to warrant putting them in separate files
function set_theme(theme){ function set_theme(theme) {
var gradioURL = window.location.href var gradioURL = window.location.href;
if (!gradioURL.includes('?__theme=')) { if (!gradioURL.includes('?__theme=')) {
window.location.replace(gradioURL + '?__theme=' + theme); window.location.replace(gradioURL + '?__theme=' + theme);
} }
@ -14,7 +14,7 @@ function all_gallery_buttons() {
if (elem.parentElement.offsetParent) { if (elem.parentElement.offsetParent) {
visibleGalleryButtons.push(elem); visibleGalleryButtons.push(elem);
} }
}) });
return visibleGalleryButtons; return visibleGalleryButtons;
} }
@ -25,31 +25,35 @@ function selected_gallery_button() {
if (elem.parentElement.offsetParent) { if (elem.parentElement.offsetParent) {
visibleCurrentButton = elem; visibleCurrentButton = elem;
} }
}) });
return visibleCurrentButton; return visibleCurrentButton;
} }
function selected_gallery_index(){ function selected_gallery_index() {
var buttons = all_gallery_buttons(); var buttons = all_gallery_buttons();
var button = selected_gallery_button(); var button = selected_gallery_button();
var result = -1 var result = -1;
buttons.forEach(function(v, i){ if(v==button) { result = i } }) buttons.forEach(function(v, i) {
if (v == button) {
result = i;
}
});
return result return result;
} }
function extract_image_from_gallery(gallery){ function extract_image_from_gallery(gallery) {
if (gallery.length == 0){ if (gallery.length == 0) {
return [null]; return [null];
} }
if (gallery.length == 1){ if (gallery.length == 1) {
return [gallery[0]]; return [gallery[0]];
} }
var index = selected_gallery_index() var index = selected_gallery_index();
if (index < 0 || index >= gallery.length){ if (index < 0 || index >= gallery.length) {
// Use the first image in the gallery as the default // Use the first image in the gallery as the default
index = 0; index = 0;
} }
@ -57,249 +61,205 @@ function extract_image_from_gallery(gallery){
return [gallery[index]]; return [gallery[index]];
} }
function args_to_array(args){ window.args_to_array = Array.from; // Compatibility with e.g. extensions that may expect this to be around
var res = []
for(var i=0;i<args.length;i++){
res.push(args[i])
}
return res
}
function switch_to_txt2img(){ function switch_to_txt2img() {
gradioApp().querySelector('#tabs').querySelectorAll('button')[0].click(); gradioApp().querySelector('#tabs').querySelectorAll('button')[0].click();
return args_to_array(arguments); return Array.from(arguments);
} }
function switch_to_img2img_tab(no){ function switch_to_img2img_tab(no) {
gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click(); gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click();
gradioApp().getElementById('mode_img2img').querySelectorAll('button')[no].click(); gradioApp().getElementById('mode_img2img').querySelectorAll('button')[no].click();
} }
function switch_to_img2img(){ function switch_to_img2img() {
switch_to_img2img_tab(0); switch_to_img2img_tab(0);
return args_to_array(arguments); return Array.from(arguments);
} }
function switch_to_sketch(){ function switch_to_sketch() {
switch_to_img2img_tab(1); switch_to_img2img_tab(1);
return args_to_array(arguments); return Array.from(arguments);
} }
function switch_to_inpaint(){ function switch_to_inpaint() {
switch_to_img2img_tab(2); switch_to_img2img_tab(2);
return args_to_array(arguments); return Array.from(arguments);
} }
function switch_to_inpaint_sketch(){ function switch_to_inpaint_sketch() {
switch_to_img2img_tab(3); switch_to_img2img_tab(3);
return args_to_array(arguments); return Array.from(arguments);
} }
function switch_to_inpaint(){ function switch_to_extras() {
gradioApp().querySelector('#tabs').querySelectorAll('button')[1].click();
gradioApp().getElementById('mode_img2img').querySelectorAll('button')[2].click();
return args_to_array(arguments);
}
function switch_to_extras(){
gradioApp().querySelector('#tabs').querySelectorAll('button')[2].click(); gradioApp().querySelector('#tabs').querySelectorAll('button')[2].click();
return args_to_array(arguments); return Array.from(arguments);
} }
function get_tab_index(tabId){ function get_tab_index(tabId) {
var res = 0 let buttons = gradioApp().getElementById(tabId).querySelector('div').querySelectorAll('button');
for (let i = 0; i < buttons.length; i++) {
gradioApp().getElementById(tabId).querySelector('div').querySelectorAll('button').forEach(function(button, i){ if (buttons[i].classList.contains('selected')) {
if(button.className.indexOf('selected') != -1) return i;
res = i
})
return res
}
function create_tab_index_args(tabId, args){
var res = []
for(var i=0; i<args.length; i++){
res.push(args[i])
} }
}
return 0;
}
res[0] = get_tab_index(tabId) function create_tab_index_args(tabId, args) {
var res = Array.from(args);
return res res[0] = get_tab_index(tabId);
return res;
} }
function get_img2img_tab_index() { function get_img2img_tab_index() {
let res = args_to_array(arguments) let res = Array.from(arguments);
res.splice(-2) res.splice(-2);
res[0] = get_tab_index('mode_img2img') res[0] = get_tab_index('mode_img2img');
return res return res;
} }
function create_submit_args(args){ function create_submit_args(args) {
var res = [] var res = Array.from(args);
for(var i=0;i<args.length;i++){
res.push(args[i])
}
// As it is currently, txt2img and img2img send back the previous output args (txt2img_gallery, generation_info, html_info) whenever you generate a new image. // As it is currently, txt2img and img2img send back the previous output args (txt2img_gallery, generation_info, html_info) whenever you generate a new image.
// This can lead to uploading a huge gallery of previously generated images, which leads to an unnecessary delay between submitting and beginning to generate. // This can lead to uploading a huge gallery of previously generated images, which leads to an unnecessary delay between submitting and beginning to generate.
// I don't know why gradio is sending outputs along with inputs, but we can prevent sending the image gallery here, which seems to be an issue for some. // I don't know why gradio is sending outputs along with inputs, but we can prevent sending the image gallery here, which seems to be an issue for some.
// If gradio at some point stops sending outputs, this may break something // If gradio at some point stops sending outputs, this may break something
if(Array.isArray(res[res.length - 3])){ if (Array.isArray(res[res.length - 3])) {
res[res.length - 3] = null res[res.length - 3] = null;
} }
return res return res;
} }
function showSubmitButtons(tabname, show){ function showSubmitButtons(tabname, show) {
gradioApp().getElementById(tabname+'_interrupt').style.display = show ? "none" : "block" gradioApp().getElementById(tabname + '_interrupt').style.display = show ? "none" : "block";
gradioApp().getElementById(tabname+'_skip').style.display = show ? "none" : "block" gradioApp().getElementById(tabname + '_skip').style.display = show ? "none" : "block";
} }
function showRestoreProgressButton(tabname, show){ function showRestoreProgressButton(tabname, show) {
var button = gradioApp().getElementById(tabname + "_restore_progress") var button = gradioApp().getElementById(tabname + "_restore_progress");
if(! button) return if (!button) return;
button.style.display = show ? "flex" : "none" button.style.display = show ? "flex" : "none";
} }
function submit(){ function submit() {
rememberGallerySelection('txt2img_gallery') showSubmitButtons('txt2img', false);
showSubmitButtons('txt2img', false)
var id = randomId() var id = randomId();
localStorage.setItem("txt2img_task_id", id); localStorage.setItem("txt2img_task_id", id);
requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function(){ requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() {
showSubmitButtons('txt2img', true) showSubmitButtons('txt2img', true);
localStorage.removeItem("txt2img_task_id") localStorage.removeItem("txt2img_task_id");
showRestoreProgressButton('txt2img', false) showRestoreProgressButton('txt2img', false);
}) });
var res = create_submit_args(arguments) var res = create_submit_args(arguments);
res[0] = id res[0] = id;
return res return res;
} }
function submit_img2img(){ function submit_img2img() {
rememberGallerySelection('img2img_gallery') showSubmitButtons('img2img', false);
showSubmitButtons('img2img', false)
var id = randomId() var id = randomId();
localStorage.setItem("img2img_task_id", id); localStorage.setItem("img2img_task_id", id);
requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function(){ requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() {
showSubmitButtons('img2img', true) showSubmitButtons('img2img', true);
localStorage.removeItem("img2img_task_id") localStorage.removeItem("img2img_task_id");
showRestoreProgressButton('img2img', false) showRestoreProgressButton('img2img', false);
}) });
var res = create_submit_args(arguments) var res = create_submit_args(arguments);
res[0] = id res[0] = id;
res[1] = get_tab_index('mode_img2img') res[1] = get_tab_index('mode_img2img');
return res return res;
} }
function restoreProgressTxt2img(){ function restoreProgressTxt2img() {
showRestoreProgressButton("txt2img", false) showRestoreProgressButton("txt2img", false);
var id = localStorage.getItem("txt2img_task_id") var id = localStorage.getItem("txt2img_task_id");
id = localStorage.getItem("txt2img_task_id") id = localStorage.getItem("txt2img_task_id");
if(id) { if (id) {
requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function(){ requestProgress(id, gradioApp().getElementById('txt2img_gallery_container'), gradioApp().getElementById('txt2img_gallery'), function() {
showSubmitButtons('txt2img', true) showSubmitButtons('txt2img', true);
}, null, 0) }, null, 0);
} }
return id return id;
} }
function restoreProgressImg2img(){ function restoreProgressImg2img() {
showRestoreProgressButton("img2img", false) showRestoreProgressButton("img2img", false);
var id = localStorage.getItem("img2img_task_id") var id = localStorage.getItem("img2img_task_id");
if(id) { if (id) {
requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function(){ requestProgress(id, gradioApp().getElementById('img2img_gallery_container'), gradioApp().getElementById('img2img_gallery'), function() {
showSubmitButtons('img2img', true) showSubmitButtons('img2img', true);
}, null, 0) }, null, 0);
} }
return id return id;
} }
onUiLoaded(function () { onUiLoaded(function() {
showRestoreProgressButton('txt2img', localStorage.getItem("txt2img_task_id")) showRestoreProgressButton('txt2img', localStorage.getItem("txt2img_task_id"));
showRestoreProgressButton('img2img', localStorage.getItem("img2img_task_id")) showRestoreProgressButton('img2img', localStorage.getItem("img2img_task_id"));
}); });
function modelmerger(){ function modelmerger() {
var id = randomId() var id = randomId();
requestProgress(id, gradioApp().getElementById('modelmerger_results_panel'), null, function(){}) requestProgress(id, gradioApp().getElementById('modelmerger_results_panel'), null, function() {});
var res = create_submit_args(arguments) var res = create_submit_args(arguments);
res[0] = id res[0] = id;
return res return res;
} }
function ask_for_style_name(_, prompt_text, negative_prompt_text) { function ask_for_style_name(_, prompt_text, negative_prompt_text) {
var name_ = prompt('Style name:') var name_ = prompt('Style name:');
return [name_, prompt_text, negative_prompt_text] return [name_, prompt_text, negative_prompt_text];
} }
function confirm_clear_prompt(prompt, negative_prompt) { function confirm_clear_prompt(prompt, negative_prompt) {
if(confirm("Delete prompt?")) { if (confirm("Delete prompt?")) {
prompt = "" prompt = "";
negative_prompt = "" negative_prompt = "";
} }
return [prompt, negative_prompt] return [prompt, negative_prompt];
} }
promptTokecountUpdateFuncs = {} var opts = {};
onAfterUiUpdate(function() {
if (Object.keys(opts).length != 0) return;
function recalculatePromptTokens(name){ var json_elem = gradioApp().getElementById('settings_json');
if(promptTokecountUpdateFuncs[name]){ if (json_elem == null) return;
promptTokecountUpdateFuncs[name]()
}
}
function recalculate_prompts_txt2img(){ var textarea = json_elem.querySelector('textarea');
recalculatePromptTokens('txt2img_prompt') var jsdata = textarea.value;
recalculatePromptTokens('txt2img_neg_prompt') opts = JSON.parse(jsdata);
return args_to_array(arguments);
}
function recalculate_prompts_img2img(){ executeCallbacks(optionsChangedCallbacks); /*global optionsChangedCallbacks*/
recalculatePromptTokens('img2img_prompt')
recalculatePromptTokens('img2img_neg_prompt')
return args_to_array(arguments);
}
var opts = {}
onUiUpdate(function(){
if(Object.keys(opts).length != 0) return;
var json_elem = gradioApp().getElementById('settings_json')
if(json_elem == null) return;
var textarea = json_elem.querySelector('textarea')
var jsdata = textarea.value
opts = JSON.parse(jsdata)
executeCallbacks(optionsChangedCallbacks);
Object.defineProperty(textarea, 'value', { Object.defineProperty(textarea, 'value', {
set: function(newValue) { set: function(newValue) {
@ -308,7 +268,7 @@ onUiUpdate(function(){
valueProp.set.call(textarea, newValue); valueProp.set.call(textarea, newValue);
if (oldValue != newValue) { if (oldValue != newValue) {
opts = JSON.parse(textarea.value) opts = JSON.parse(textarea.value);
} }
executeCallbacks(optionsChangedCallbacks); executeCallbacks(optionsChangedCallbacks);
@ -319,123 +279,109 @@ onUiUpdate(function(){
} }
}); });
json_elem.parentElement.style.display="none" json_elem.parentElement.style.display = "none";
function registerTextarea(id, id_counter, id_button){ setupTokenCounters();
var prompt = gradioApp().getElementById(id)
var counter = gradioApp().getElementById(id_counter)
var textarea = gradioApp().querySelector("#" + id + " > label > textarea");
if(counter.parentElement == prompt.parentElement){ var show_all_pages = gradioApp().getElementById('settings_show_all_pages');
return var settings_tabs = gradioApp().querySelector('#settings div');
} if (show_all_pages && settings_tabs) {
settings_tabs.appendChild(show_all_pages);
prompt.parentElement.insertBefore(counter, prompt) show_all_pages.onclick = function() {
prompt.parentElement.style.position = "relative" gradioApp().querySelectorAll('#settings > div').forEach(function(elem) {
if (elem.id == "settings_tab_licenses") {
promptTokecountUpdateFuncs[id] = function(){ update_token_counter(id_button); }
textarea.addEventListener("input", promptTokecountUpdateFuncs[id]);
}
registerTextarea('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button')
registerTextarea('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button')
registerTextarea('img2img_prompt', 'img2img_token_counter', 'img2img_token_button')
registerTextarea('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button')
var show_all_pages = gradioApp().getElementById('settings_show_all_pages')
var settings_tabs = gradioApp().querySelector('#settings div')
if(show_all_pages && settings_tabs){
settings_tabs.appendChild(show_all_pages)
show_all_pages.onclick = function(){
gradioApp().querySelectorAll('#settings > div').forEach(function(elem){
if(elem.id == "settings_tab_licenses")
return; return;
}
elem.style.display = "block"; elem.style.display = "block";
}) });
};
} }
} });
})
onOptionsChanged(function(){ onOptionsChanged(function() {
var elem = gradioApp().getElementById('sd_checkpoint_hash') var elem = gradioApp().getElementById('sd_checkpoint_hash');
var sd_checkpoint_hash = opts.sd_checkpoint_hash || "" var sd_checkpoint_hash = opts.sd_checkpoint_hash || "";
var shorthash = sd_checkpoint_hash.substring(0,10) var shorthash = sd_checkpoint_hash.substring(0, 10);
if(elem && elem.textContent != shorthash){ if (elem && elem.textContent != shorthash) {
elem.textContent = shorthash elem.textContent = shorthash;
elem.title = sd_checkpoint_hash elem.title = sd_checkpoint_hash;
elem.href = "https://google.com/search?q=" + sd_checkpoint_hash elem.href = "https://google.com/search?q=" + sd_checkpoint_hash;
} }
}) });
let txt2img_textarea, img2img_textarea = undefined; let txt2img_textarea, img2img_textarea = undefined;
let wait_time = 800
let token_timeouts = {};
function update_txt2img_tokens(...args) { function restart_reload() {
update_token_counter("txt2img_token_button") document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
if (args.length == 2)
return args[0]
return args;
}
function update_img2img_tokens(...args) { var requestPing = function() {
update_token_counter("img2img_token_button") requestGet("./internal/ping", {}, function(data) {
if (args.length == 2)
return args[0]
return args;
}
function update_token_counter(button_id) {
if (token_timeouts[button_id])
clearTimeout(token_timeouts[button_id]);
token_timeouts[button_id] = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time);
}
function restart_reload(){
document.body.innerHTML='<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
var requestPing = function(){
requestGet("./internal/ping", {}, function(data){
location.reload(); location.reload();
}, function(){ }, function() {
setTimeout(requestPing, 500); setTimeout(requestPing, 500);
}) });
} };
setTimeout(requestPing, 2000); setTimeout(requestPing, 2000);
return [] return [];
} }
// Simulate an `input` DOM event for Gradio Textbox component. Needed after you edit its contents in javascript, otherwise your edits // Simulate an `input` DOM event for Gradio Textbox component. Needed after you edit its contents in javascript, otherwise your edits
// will only visible on web page and not sent to python. // will only visible on web page and not sent to python.
function updateInput(target){ function updateInput(target) {
let e = new Event("input", { bubbles: true }) let e = new Event("input", {bubbles: true});
Object.defineProperty(e, "target", {value: target}) Object.defineProperty(e, "target", {value: target});
target.dispatchEvent(e); target.dispatchEvent(e);
} }
var desiredCheckpointName = null; var desiredCheckpointName = null;
function selectCheckpoint(name){ function selectCheckpoint(name) {
desiredCheckpointName = name; desiredCheckpointName = name;
gradioApp().getElementById('change_checkpoint').click() gradioApp().getElementById('change_checkpoint').click();
} }
function currentImg2imgSourceResolution(_, _, scaleBy){ function currentImg2imgSourceResolution(w, h, scaleBy) {
var img = gradioApp().querySelector('#mode_img2img > div[style="display: block;"] img') var img = gradioApp().querySelector('#mode_img2img > div[style="display: block;"] img');
return img ? [img.naturalWidth, img.naturalHeight, scaleBy] : [0, 0, scaleBy] return img ? [img.naturalWidth, img.naturalHeight, scaleBy] : [0, 0, scaleBy];
} }
function updateImg2imgResizeToTextAfterChangingImage(){ function updateImg2imgResizeToTextAfterChangingImage() {
// At the time this is called from gradio, the image has no yet been replaced. // At the time this is called from gradio, the image has no yet been replaced.
// There may be a better solution, but this is simple and straightforward so I'm going with it. // There may be a better solution, but this is simple and straightforward so I'm going with it.
setTimeout(function() { setTimeout(function() {
gradioApp().getElementById('img2img_update_resize_to').click() gradioApp().getElementById('img2img_update_resize_to').click();
}, 500); }, 500);
return [] return [];
}
function setRandomSeed(elem_id) {
var input = gradioApp().querySelector("#" + elem_id + " input");
if (!input) return [];
input.value = "-1";
updateInput(input);
return [];
}
function switchWidthHeight(tabname) {
var width = gradioApp().querySelector("#" + tabname + "_width input[type=number]");
var height = gradioApp().querySelector("#" + tabname + "_height input[type=number]");
if (!width || !height) return [];
var tmp = width.value;
width.value = height.value;
height.value = tmp;
updateInput(width);
updateInput(height);
return [];
} }

View File

@ -1,41 +1,62 @@
// various hints and extra info for the settings tab // various hints and extra info for the settings tab
onUiLoaded(function(){ var settingsHintsSetup = false;
createLink = function(elem_id, text, href){
var a = document.createElement('A')
a.textContent = text
a.target = '_blank';
elem = gradioApp().querySelector('#'+elem_id) onOptionsChanged(function() {
elem.insertBefore(a, elem.querySelector('label')) if (settingsHintsSetup) return;
settingsHintsSetup = true;
return a gradioApp().querySelectorAll('#settings [id^=setting_]').forEach(function(div) {
var name = div.id.substr(8);
var commentBefore = opts._comments_before[name];
var commentAfter = opts._comments_after[name];
if (!commentBefore && !commentAfter) return;
var span = null;
if (div.classList.contains('gradio-checkbox')) span = div.querySelector('label span');
else if (div.classList.contains('gradio-checkboxgroup')) span = div.querySelector('span').firstChild;
else if (div.classList.contains('gradio-radio')) span = div.querySelector('span').firstChild;
else span = div.querySelector('label span').firstChild;
if (!span) return;
if (commentBefore) {
var comment = document.createElement('DIV');
comment.className = 'settings-comment';
comment.innerHTML = commentBefore;
span.parentElement.insertBefore(document.createTextNode('\xa0'), span);
span.parentElement.insertBefore(comment, span);
span.parentElement.insertBefore(document.createTextNode('\xa0'), span);
} }
if (commentAfter) {
comment = document.createElement('DIV');
comment.className = 'settings-comment';
comment.innerHTML = commentAfter;
span.parentElement.insertBefore(comment, span.nextSibling);
span.parentElement.insertBefore(document.createTextNode('\xa0'), span.nextSibling);
}
});
});
createLink("setting_samples_filename_pattern", "[wiki] ").href = "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory" function settingsHintsShowQuicksettings() {
createLink("setting_directories_filename_pattern", "[wiki] ").href = "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory" requestGet("./internal/quicksettings-hint", {}, function(data) {
var table = document.createElement('table');
table.className = 'popup-table';
createLink("setting_quicksettings_list", "[info] ").addEventListener("click", function(event){ data.forEach(function(obj) {
requestGet("./internal/quicksettings-hint", {}, function(data){ var tr = document.createElement('tr');
var table = document.createElement('table') var td = document.createElement('td');
table.className = 'settings-value-table' td.textContent = obj.name;
tr.appendChild(td);
data.forEach(function(obj){ td = document.createElement('td');
var tr = document.createElement('tr') td.textContent = obj.label;
var td = document.createElement('td') tr.appendChild(td);
td.textContent = obj.name
tr.appendChild(td)
var td = document.createElement('td') table.appendChild(tr);
td.textContent = obj.label });
tr.appendChild(td)
table.appendChild(tr)
})
popup(table); popup(table);
})
}); });
}) }

384
launch.py
View File

@ -1,370 +1,38 @@
# this scripts installs necessary requirements and launches main program in webui.py from modules import launch_utils
import subprocess
import os
import sys
import importlib.util
import shlex
import platform
import json
from modules import cmd_args
from modules.paths_internal import script_path, extensions_dir
commandline_args = os.environ.get('COMMANDLINE_ARGS', "") args = launch_utils.args
sys.argv += shlex.split(commandline_args) python = launch_utils.python
git = launch_utils.git
index_url = launch_utils.index_url
dir_repos = launch_utils.dir_repos
args, _ = cmd_args.parser.parse_known_args() commit_hash = launch_utils.commit_hash
git_tag = launch_utils.git_tag
python = sys.executable run = launch_utils.run
git = os.environ.get('GIT', "git") is_installed = launch_utils.is_installed
index_url = os.environ.get('INDEX_URL', "") repo_dir = launch_utils.repo_dir
stored_commit_hash = None
stored_git_tag = None
dir_repos = "repositories"
if 'GRADIO_ANALYTICS_ENABLED' not in os.environ: run_pip = launch_utils.run_pip
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False' check_run_python = launch_utils.check_run_python
git_clone = launch_utils.git_clone
git_pull_recursive = launch_utils.git_pull_recursive
run_extension_installer = launch_utils.run_extension_installer
prepare_environment = launch_utils.prepare_environment
configure_for_tests = launch_utils.configure_for_tests
start = launch_utils.start
def check_python_version(): def main():
is_windows = platform.system() == "Windows" if not args.skip_prepare_environment:
major = sys.version_info.major prepare_environment()
minor = sys.version_info.minor
micro = sys.version_info.micro
if is_windows: if args.test_server:
supported_minors = [10] configure_for_tests()
else:
supported_minors = [7, 8, 9, 10, 11]
if not (major == 3 and minor in supported_minors): start()
import modules.errors
modules.errors.print_error_explanation(f"""
INCOMPATIBLE PYTHON VERSION
This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}.
If you encounter an error with "RuntimeError: Couldn't install torch." message,
or any other error regarding unsuccessful package (library) installation,
please downgrade (or upgrade) to the latest version of 3.10 Python
and delete current Python and "venv" folder in WebUI's directory.
You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/
{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
Use --skip-python-version-check to suppress this warning.
""")
def commit_hash():
global stored_commit_hash
if stored_commit_hash is not None:
return stored_commit_hash
try:
stored_commit_hash = run(f"{git} rev-parse HEAD").strip()
except Exception:
stored_commit_hash = "<none>"
return stored_commit_hash
def git_tag():
global stored_git_tag
if stored_git_tag is not None:
return stored_git_tag
try:
stored_git_tag = run(f"{git} describe --tags").strip()
except Exception:
stored_git_tag = "<none>"
return stored_git_tag
def run(command, desc=None, errdesc=None, custom_env=None, live=False):
if desc is not None:
print(desc)
if live:
result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env)
if result.returncode != 0:
raise RuntimeError(f"""{errdesc or 'Error running command'}.
Command: {command}
Error code: {result.returncode}""")
return ""
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
if result.returncode != 0:
message = f"""{errdesc or 'Error running command'}.
Command: {command}
Error code: {result.returncode}
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
"""
raise RuntimeError(message)
return result.stdout.decode(encoding="utf8", errors="ignore")
def check_run(command):
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.returncode == 0
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
except ModuleNotFoundError:
return False
return spec is not None
def repo_dir(name):
return os.path.join(script_path, dir_repos, name)
def run_python(code, desc=None, errdesc=None):
return run(f'"{python}" -c "{code}"', desc, errdesc)
def run_pip(command, desc=None, live=False):
if args.skip_install:
return
index_url_line = f' --index-url {index_url}' if index_url != '' else ''
return run(f'"{python}" -m pip {command} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}", live=live)
def check_run_python(code):
return check_run(f'"{python}" -c "{code}"')
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
if os.path.exists(dir):
if commithash is None:
return
current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
if current_hash == commithash:
return
run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
if commithash is not None:
run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
def git_pull_recursive(dir):
for subdir, _, _ in os.walk(dir):
if os.path.exists(os.path.join(subdir, '.git')):
try:
output = subprocess.check_output([git, '-C', subdir, 'pull', '--autostash'])
print(f"Pulled changes for repository in '{subdir}':\n{output.decode('utf-8').strip()}\n")
except subprocess.CalledProcessError as e:
print(f"Couldn't perform 'git pull' on repository in '{subdir}':\n{e.output.decode('utf-8').strip()}\n")
def version_check(commit):
try:
import requests
commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json()
if commit != "<none>" and commits['commit']['sha'] != commit:
print("--------------------------------------------------------")
print("| You are not up to date with the most recent release. |")
print("| Consider running `git pull` to update. |")
print("--------------------------------------------------------")
elif commits['commit']['sha'] == commit:
print("You are up to date with the most recent release.")
else:
print("Not a git clone, can't perform version check.")
except Exception as e:
print("version check failed", e)
def run_extension_installer(extension_dir):
path_installer = os.path.join(extension_dir, "install.py")
if not os.path.isfile(path_installer):
return
try:
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
except Exception as e:
print(e, file=sys.stderr)
def list_extensions(settings_file):
settings = {}
try:
if os.path.isfile(settings_file):
with open(settings_file, "r", encoding="utf8") as file:
settings = json.load(file)
except Exception as e:
print(e, file=sys.stderr)
disabled_extensions = set(settings.get('disabled_extensions', []))
disable_all_extensions = settings.get('disable_all_extensions', 'none')
if disable_all_extensions != 'none':
return []
return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions]
def run_extensions_installers(settings_file):
if not os.path.isdir(extensions_dir):
return
for dirname_extension in list_extensions(settings_file):
run_extension_installer(os.path.join(extensions_dir, dirname_extension))
def prepare_environment():
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url https://download.pytorch.org/whl/cu118")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17')
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b")
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "5b3af030dd83e0297272d861c19477735d0317ec")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
if not args.skip_python_version_check:
check_python_version()
commit = commit_hash()
tag = git_tag()
print(f"Python {sys.version}")
print(f"Version: {tag}")
print(f"Commit hash: {commit}")
if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
if not args.skip_torch_cuda_test:
run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
if not is_installed("open_clip"):
run_pip(f"install {openclip_package}", "open_clip")
if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers:
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True)
else:
print("Installation of xformers is not supported in this version of Python.")
print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
if not is_installed("xformers"):
exit(0)
elif platform.system() == "Linux":
run_pip(f"install {xformers_package}", "xformers")
if not is_installed("pyngrok") and args.ngrok:
run_pip("install pyngrok", "ngrok")
os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
if not is_installed("lpips"):
run_pip(f"install -r \"{os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\"", "requirements for CodeFormer")
if not os.path.isfile(requirements_file):
requirements_file = os.path.join(script_path, requirements_file)
run_pip(f"install -r \"{requirements_file}\"", "requirements")
run_extensions_installers(settings_file=args.ui_settings_file)
if args.update_check:
version_check(commit)
if args.update_all_extensions:
git_pull_recursive(extensions_dir)
if "--exit" in sys.argv:
print("Exiting because of --exit argument")
exit(0)
if args.tests and not args.no_tests:
exitcode = tests(args.tests)
exit(exitcode)
def tests(test_dir):
if "--api" not in sys.argv:
sys.argv.append("--api")
if "--ckpt" not in sys.argv:
sys.argv.append("--ckpt")
sys.argv.append(os.path.join(script_path, "test/test_files/empty.pt"))
if "--skip-torch-cuda-test" not in sys.argv:
sys.argv.append("--skip-torch-cuda-test")
if "--disable-nan-check" not in sys.argv:
sys.argv.append("--disable-nan-check")
if "--no-tests" not in sys.argv:
sys.argv.append("--no-tests")
print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}")
os.environ['COMMANDLINE_ARGS'] = ""
with open(os.path.join(script_path, 'test/stdout.txt'), "w", encoding="utf8") as stdout, open(os.path.join(script_path, 'test/stderr.txt'), "w", encoding="utf8") as stderr:
proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr)
import test.server_poll
exitcode = test.server_poll.run_tests(proc, test_dir)
print(f"Stopping Web UI process with id {proc.pid}")
proc.kill()
return exitcode
def start():
print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
import webui
if '--nowebui' in sys.argv:
webui.api_only()
else:
webui.webui()
if __name__ == "__main__": if __name__ == "__main__":
prepare_environment() main()
start()

BIN
modules/Roboto-Regular.ttf Normal file

Binary file not shown.

View File

@ -14,32 +14,31 @@ from fastapi.encoders import jsonable_encoder
from secrets import compare_digest from secrets import compare_digest
import modules.shared as shared import modules.shared as shared
from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart
from modules.api.models import * from modules.api import models
from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
from modules.textual_inversion.preprocess import preprocess from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin,Image from PIL import PngImagePlugin,Image
from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights, checkpoint_alisases
from modules.sd_vae import vae_dict
from modules.sd_models_config import find_checkpoint_config_near_filename from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models from modules.realesrgan_model import get_realesrgan_models
from modules import devices from modules import devices
from typing import List from typing import Dict, List, Any
import piexif import piexif
import piexif.helper import piexif.helper
from contextlib import closing
def upscaler_to_index(name: str):
try:
return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
except:
raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in sd_upscalers])}")
def script_name_to_index(name, scripts): def script_name_to_index(name, scripts):
try: try:
return [script.title().lower() for script in scripts].index(name.lower()) return [script.title().lower() for script in scripts].index(name.lower())
except: except Exception as e:
raise HTTPException(status_code=422, detail=f"Script '{name}' not found") raise HTTPException(status_code=422, detail=f"Script '{name}' not found") from e
def validate_sampler_name(name): def validate_sampler_name(name):
config = sd_samplers.all_samplers_map.get(name, None) config = sd_samplers.all_samplers_map.get(name, None)
@ -48,20 +47,23 @@ def validate_sampler_name(name):
return name return name
def setUpscalers(req: dict): def setUpscalers(req: dict):
reqDict = vars(req) reqDict = vars(req)
reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None) reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None)
reqDict['extras_upscaler_2'] = reqDict.pop('upscaler_2', None) reqDict['extras_upscaler_2'] = reqDict.pop('upscaler_2', None)
return reqDict return reqDict
def decode_base64_to_image(encoding): def decode_base64_to_image(encoding):
if encoding.startswith("data:image/"): if encoding.startswith("data:image/"):
encoding = encoding.split(";")[1].split(",")[1] encoding = encoding.split(";")[1].split(",")[1]
try: try:
image = Image.open(BytesIO(base64.b64decode(encoding))) image = Image.open(BytesIO(base64.b64decode(encoding)))
return image return image
except Exception as err: except Exception as e:
raise HTTPException(status_code=500, detail="Invalid encoded image") raise HTTPException(status_code=500, detail="Invalid encoded image") from e
def encode_pil_to_base64(image): def encode_pil_to_base64(image):
with io.BytesIO() as output_bytes: with io.BytesIO() as output_bytes:
@ -76,6 +78,8 @@ def encode_pil_to_base64(image):
image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality) image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality)
elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"): elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"):
if image.mode == "RGBA":
image = image.convert("RGB")
parameters = image.info.get('parameters', None) parameters = image.info.get('parameters', None)
exif_bytes = piexif.dump({ exif_bytes = piexif.dump({
"Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(parameters or "", encoding="unicode") } "Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(parameters or "", encoding="unicode") }
@ -92,6 +96,7 @@ def encode_pil_to_base64(image):
return base64.b64encode(bytes_data) return base64.b64encode(bytes_data)
def api_middleware(app: FastAPI): def api_middleware(app: FastAPI):
rich_available = True rich_available = True
try: try:
@ -99,8 +104,7 @@ def api_middleware(app: FastAPI):
import starlette # importing just so it can be placed on silent list import starlette # importing just so it can be placed on silent list
from rich.console import Console from rich.console import Console
console = Console() console = Console()
except: except Exception:
import traceback
rich_available = False rich_available = False
@app.middleware("http") @app.middleware("http")
@ -131,11 +135,12 @@ def api_middleware(app: FastAPI):
"errors": str(e), "errors": str(e),
} }
if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions
print(f"API error: {request.method}: {request.url} {err}") message = f"API error: {request.method}: {request.url} {err}"
if rich_available: if rich_available:
print(message)
console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200])) console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
else: else:
traceback.print_exc() errors.report(message, exc_info=True)
return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err)) return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))
@app.middleware("http") @app.middleware("http")
@ -157,7 +162,7 @@ def api_middleware(app: FastAPI):
class Api: class Api:
def __init__(self, app: FastAPI, queue_lock: Lock): def __init__(self, app: FastAPI, queue_lock: Lock):
if shared.cmd_opts.api_auth: if shared.cmd_opts.api_auth:
self.credentials = dict() self.credentials = {}
for auth in shared.cmd_opts.api_auth.split(","): for auth in shared.cmd_opts.api_auth.split(","):
user, password = auth.split(":") user, password = auth.split(":")
self.credentials[user] = password self.credentials[user] = password
@ -166,36 +171,44 @@ class Api:
self.app = app self.app = app
self.queue_lock = queue_lock self.queue_lock = queue_lock
api_middleware(self.app) api_middleware(self.app)
self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=models.TextToImageResponse)
self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=models.ImageToImageResponse)
self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse)
self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse) self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse)
self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse) self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse)
self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse) self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse)
self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"]) self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"])
self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"]) self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"]) self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"])
self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel) self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel)
self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"]) self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel) self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem]) self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem])
self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem]) self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem])
self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem]) self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=List[models.LatentUpscalerModeItem])
self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem]) self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem])
self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem]) self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=List[models.SDVaeItem])
self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem]) self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem])
self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem]) self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem])
self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse) self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem])
self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem])
self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse)
self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse) self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse) self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=PreprocessResponse) self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse)
self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse) self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse)
self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse) self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse)
self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse) self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse)
self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"])
self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"]) self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"])
self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=ScriptsList) self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList)
self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo])
if shared.cmd_opts.api_server_stop:
self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"])
self.add_api_route("/sdapi/v1/server-restart", self.restart_webui, methods=["POST"])
self.add_api_route("/sdapi/v1/server-stop", self.stop_webui, methods=["POST"])
self.default_script_arg_txt2img = [] self.default_script_arg_txt2img = []
self.default_script_arg_img2img = [] self.default_script_arg_img2img = []
@ -221,10 +234,18 @@ class Api:
return script, script_idx return script, script_idx
def get_scripts_list(self): def get_scripts_list(self):
t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles] t2ilist = [script.name for script in scripts.scripts_txt2img.scripts if script.name is not None]
i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles] i2ilist = [script.name for script in scripts.scripts_img2img.scripts if script.name is not None]
return ScriptsList(txt2img = t2ilist, img2img = i2ilist) return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist)
def get_script_info(self):
res = []
for script_list in [scripts.scripts_txt2img.scripts, scripts.scripts_img2img.scripts]:
res += [script.api_info for script in script_list if script.api_info is not None]
return res
def get_script(self, script_name, script_runner): def get_script(self, script_name, script_runner):
if script_name is None or script_name == "": if script_name is None or script_name == "":
@ -261,14 +282,14 @@ class Api:
script_args[0] = selectable_idx + 1 script_args[0] = selectable_idx + 1
# Now check for always on scripts # Now check for always on scripts
if request.alwayson_scripts and (len(request.alwayson_scripts) > 0): if request.alwayson_scripts:
for alwayson_script_name in request.alwayson_scripts.keys(): for alwayson_script_name in request.alwayson_scripts.keys():
alwayson_script = self.get_script(alwayson_script_name, script_runner) alwayson_script = self.get_script(alwayson_script_name, script_runner)
if alwayson_script == None: if alwayson_script is None:
raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found") raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found")
# Selectable script in always on script param check # Selectable script in always on script param check
if alwayson_script.alwayson == False: if alwayson_script.alwayson is False:
raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params") raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params")
# always on script with no arg should always run so you don't really need to add them to the requests # always on script with no arg should always run so you don't really need to add them to the requests
if "args" in request.alwayson_scripts[alwayson_script_name]: if "args" in request.alwayson_scripts[alwayson_script_name]:
# min between arg length in scriptrunner and arg length in the request # min between arg length in scriptrunner and arg length in the request
@ -276,7 +297,7 @@ class Api:
script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx] script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx]
return script_args return script_args
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI):
script_runner = scripts.scripts_txt2img script_runner = scripts.scripts_txt2img
if not script_runner.scripts: if not script_runner.scripts:
script_runner.initialize_scripts(False) script_runner.initialize_scripts(False)
@ -304,13 +325,13 @@ class Api:
args.pop('save_images', None) args.pop('save_images', None)
with self.queue_lock: with self.queue_lock:
p = StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args) with closing(StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)) as p:
p.scripts = script_runner p.scripts = script_runner
p.outpath_grids = opts.outdir_txt2img_grids p.outpath_grids = opts.outdir_txt2img_grids
p.outpath_samples = opts.outdir_txt2img_samples p.outpath_samples = opts.outdir_txt2img_samples
shared.state.begin() shared.state.begin(job="scripts_txt2img")
if selectable_scripts != None: if selectable_scripts is not None:
p.script_args = script_args p.script_args = script_args
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
else: else:
@ -320,9 +341,9 @@ class Api:
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else [] b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js()) return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI): def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI):
init_images = img2imgreq.init_images init_images = img2imgreq.init_images
if init_images is None: if init_images is None:
raise HTTPException(status_code=404, detail="Init image not found") raise HTTPException(status_code=404, detail="Init image not found")
@ -360,14 +381,14 @@ class Api:
args.pop('save_images', None) args.pop('save_images', None)
with self.queue_lock: with self.queue_lock:
p = StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args) with closing(StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)) as p:
p.init_images = [decode_base64_to_image(x) for x in init_images] p.init_images = [decode_base64_to_image(x) for x in init_images]
p.scripts = script_runner p.scripts = script_runner
p.outpath_grids = opts.outdir_img2img_grids p.outpath_grids = opts.outdir_img2img_grids
p.outpath_samples = opts.outdir_img2img_samples p.outpath_samples = opts.outdir_img2img_samples
shared.state.begin() shared.state.begin(job="scripts_img2img")
if selectable_scripts != None: if selectable_scripts is not None:
p.script_args = script_args p.script_args = script_args
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
else: else:
@ -381,9 +402,9 @@ class Api:
img2imgreq.init_images = None img2imgreq.init_images = None
img2imgreq.mask = None img2imgreq.mask = None
return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js()) return models.ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js())
def extras_single_image_api(self, req: ExtrasSingleImageRequest): def extras_single_image_api(self, req: models.ExtrasSingleImageRequest):
reqDict = setUpscalers(req) reqDict = setUpscalers(req)
reqDict['image'] = decode_base64_to_image(reqDict['image']) reqDict['image'] = decode_base64_to_image(reqDict['image'])
@ -391,9 +412,9 @@ class Api:
with self.queue_lock: with self.queue_lock:
result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict) result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict)
return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1]) return models.ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1])
def extras_batch_images_api(self, req: ExtrasBatchImagesRequest): def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest):
reqDict = setUpscalers(req) reqDict = setUpscalers(req)
image_list = reqDict.pop('imageList', []) image_list = reqDict.pop('imageList', [])
@ -402,15 +423,15 @@ class Api:
with self.queue_lock: with self.queue_lock:
result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict) result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict)
return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1]) return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
def pnginfoapi(self, req: PNGInfoRequest): def pnginfoapi(self, req: models.PNGInfoRequest):
if(not req.image.strip()): if(not req.image.strip()):
return PNGInfoResponse(info="") return models.PNGInfoResponse(info="")
image = decode_base64_to_image(req.image.strip()) image = decode_base64_to_image(req.image.strip())
if image is None: if image is None:
return PNGInfoResponse(info="") return models.PNGInfoResponse(info="")
geninfo, items = images.read_info_from_image(image) geninfo, items = images.read_info_from_image(image)
if geninfo is None: if geninfo is None:
@ -418,13 +439,13 @@ class Api:
items = {**{'parameters': geninfo}, **items} items = {**{'parameters': geninfo}, **items}
return PNGInfoResponse(info=geninfo, items=items) return models.PNGInfoResponse(info=geninfo, items=items)
def progressapi(self, req: ProgressRequest = Depends()): def progressapi(self, req: models.ProgressRequest = Depends()):
# copy from check_progress_call of ui.py # copy from check_progress_call of ui.py
if shared.state.job_count == 0: if shared.state.job_count == 0:
return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo) return models.ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo)
# avoid dividing zero # avoid dividing zero
progress = 0.01 progress = 0.01
@ -446,9 +467,9 @@ class Api:
if shared.state.current_image and not req.skip_current_image: if shared.state.current_image and not req.skip_current_image:
current_image = encode_pil_to_base64(shared.state.current_image) current_image = encode_pil_to_base64(shared.state.current_image)
return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo) return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
def interrogateapi(self, interrogatereq: InterrogateRequest): def interrogateapi(self, interrogatereq: models.InterrogateRequest):
image_b64 = interrogatereq.image image_b64 = interrogatereq.image
if image_b64 is None: if image_b64 is None:
raise HTTPException(status_code=404, detail="Image not found") raise HTTPException(status_code=404, detail="Image not found")
@ -465,7 +486,7 @@ class Api:
else: else:
raise HTTPException(status_code=404, detail="Model not found") raise HTTPException(status_code=404, detail="Model not found")
return InterrogateResponse(caption=processed) return models.InterrogateResponse(caption=processed)
def interruptapi(self): def interruptapi(self):
shared.state.interrupt() shared.state.interrupt()
@ -497,6 +518,10 @@ class Api:
return options return options
def set_config(self, req: Dict[str, Any]): def set_config(self, req: Dict[str, Any]):
checkpoint_name = req.get("sd_model_checkpoint", None)
if checkpoint_name is not None and checkpoint_name not in checkpoint_alisases:
raise RuntimeError(f"model {checkpoint_name!r} not found")
for k, v in req.items(): for k, v in req.items():
shared.opts.set(k, v) shared.opts.set(k, v)
@ -521,9 +546,20 @@ class Api:
for upscaler in shared.sd_upscalers for upscaler in shared.sd_upscalers
] ]
def get_latent_upscale_modes(self):
return [
{
"name": upscale_mode,
}
for upscale_mode in [*(shared.latent_upscale_modes or {})]
]
def get_sd_models(self): def get_sd_models(self):
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()] return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
def get_sd_vaes(self):
return [{"model_name": x, "filename": vae_dict[x]} for x in vae_dict.keys()]
def get_hypernetworks(self): def get_hypernetworks(self):
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks] return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
@ -566,44 +602,42 @@ class Api:
def create_embedding(self, args: dict): def create_embedding(self, args: dict):
try: try:
shared.state.begin() shared.state.begin(job="create_embedding")
filename = create_embedding(**args) # create empty embedding filename = create_embedding(**args) # create empty embedding
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used
shared.state.end() return models.CreateResponse(info=f"create embedding filename: {filename}")
return CreateResponse(info=f"create embedding filename: {filename}")
except AssertionError as e: except AssertionError as e:
return models.TrainResponse(info=f"create embedding error: {e}")
finally:
shared.state.end() shared.state.end()
return TrainResponse(info=f"create embedding error: {e}")
def create_hypernetwork(self, args: dict): def create_hypernetwork(self, args: dict):
try: try:
shared.state.begin() shared.state.begin(job="create_hypernetwork")
filename = create_hypernetwork(**args) # create empty embedding filename = create_hypernetwork(**args) # create empty embedding
shared.state.end() return models.CreateResponse(info=f"create hypernetwork filename: {filename}")
return CreateResponse(info=f"create hypernetwork filename: {filename}")
except AssertionError as e: except AssertionError as e:
return models.TrainResponse(info=f"create hypernetwork error: {e}")
finally:
shared.state.end() shared.state.end()
return TrainResponse(info=f"create hypernetwork error: {e}")
def preprocess(self, args: dict): def preprocess(self, args: dict):
try: try:
shared.state.begin() shared.state.begin(job="preprocess")
preprocess(**args) # quick operation unless blip/booru interrogation is enabled preprocess(**args) # quick operation unless blip/booru interrogation is enabled
shared.state.end() shared.state.end()
return PreprocessResponse(info = 'preprocess complete') return models.PreprocessResponse(info='preprocess complete')
except KeyError as e: except KeyError as e:
return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}")
except Exception as e:
return models.PreprocessResponse(info=f"preprocess error: {e}")
finally:
shared.state.end() shared.state.end()
return PreprocessResponse(info=f"preprocess error: invalid token: {e}")
except AssertionError as e:
shared.state.end()
return PreprocessResponse(info=f"preprocess error: {e}")
except FileNotFoundError as e:
shared.state.end()
return PreprocessResponse(info=f'preprocess error: {e}')
def train_embedding(self, args: dict): def train_embedding(self, args: dict):
try: try:
shared.state.begin() shared.state.begin(job="train_embedding")
apply_optimizations = shared.opts.training_xattention_optimizations apply_optimizations = shared.opts.training_xattention_optimizations
error = None error = None
filename = '' filename = ''
@ -616,15 +650,15 @@ class Api:
finally: finally:
if not apply_optimizations: if not apply_optimizations:
sd_hijack.apply_optimizations() sd_hijack.apply_optimizations()
return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
except Exception as msg:
return models.TrainResponse(info=f"train embedding error: {msg}")
finally:
shared.state.end() shared.state.end()
return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
except AssertionError as msg:
shared.state.end()
return TrainResponse(info=f"train embedding error: {msg}")
def train_hypernetwork(self, args: dict): def train_hypernetwork(self, args: dict):
try: try:
shared.state.begin() shared.state.begin(job="train_hypernetwork")
shared.loaded_hypernetworks = [] shared.loaded_hypernetworks = []
apply_optimizations = shared.opts.training_xattention_optimizations apply_optimizations = shared.opts.training_xattention_optimizations
error = None error = None
@ -641,14 +675,16 @@ class Api:
if not apply_optimizations: if not apply_optimizations:
sd_hijack.apply_optimizations() sd_hijack.apply_optimizations()
shared.state.end() shared.state.end()
return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}") return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
except AssertionError as msg: except Exception as exc:
return models.TrainResponse(info=f"train embedding error: {exc}")
finally:
shared.state.end() shared.state.end()
return TrainResponse(info=f"train embedding error: {error}")
def get_memory(self): def get_memory(self):
try: try:
import os, psutil import os
import psutil
process = psutil.Process(os.getpid()) process = psutil.Process(os.getpid())
res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values
ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe
@ -675,11 +711,23 @@ class Api:
'events': warnings, 'events': warnings,
} }
else: else:
cuda = { 'error': 'unavailable' } cuda = {'error': 'unavailable'}
except Exception as err: except Exception as err:
cuda = { 'error': f'{err}' } cuda = {'error': f'{err}'}
return MemoryResponse(ram = ram, cuda = cuda) return models.MemoryResponse(ram=ram, cuda=cuda)
def launch(self, server_name, port): def launch(self, server_name, port):
self.app.include_router(self.router) self.app.include_router(self.router)
uvicorn.run(self.app, host=server_name, port=port) uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=0)
def kill_webui(self):
restart.stop_program()
def restart_webui(self):
if restart.is_restartable():
restart.restart_program()
return Response(status_code=501)
def stop_webui(request):
shared.state.server_command = "stop"
return Response("Stopping.")

View File

@ -223,8 +223,9 @@ for key in _options:
if(_options[key].dest != 'help'): if(_options[key].dest != 'help'):
flag = _options[key] flag = _options[key]
_type = str _type = str
if _options[key].default is not None: _type = type(_options[key].default) if _options[key].default is not None:
flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))}) _type = type(_options[key].default)
flags.update({flag.dest: (_type, Field(default=flag.default, description=flag.help))})
FlagsModel = create_model("Flags", **flags) FlagsModel = create_model("Flags", **flags)
@ -240,6 +241,9 @@ class UpscalerItem(BaseModel):
model_url: Optional[str] = Field(title="URL") model_url: Optional[str] = Field(title="URL")
scale: Optional[float] = Field(title="Scale") scale: Optional[float] = Field(title="Scale")
class LatentUpscalerModeItem(BaseModel):
name: str = Field(title="Name")
class SDModelItem(BaseModel): class SDModelItem(BaseModel):
title: str = Field(title="Title") title: str = Field(title="Title")
model_name: str = Field(title="Model Name") model_name: str = Field(title="Model Name")
@ -248,6 +252,10 @@ class SDModelItem(BaseModel):
filename: str = Field(title="Filename") filename: str = Field(title="Filename")
config: Optional[str] = Field(title="Config file") config: Optional[str] = Field(title="Config file")
class SDVaeItem(BaseModel):
model_name: str = Field(title="Model Name")
filename: str = Field(title="Filename")
class HypernetworkItem(BaseModel): class HypernetworkItem(BaseModel):
name: str = Field(title="Name") name: str = Field(title="Name")
path: Optional[str] = Field(title="Path") path: Optional[str] = Field(title="Path")
@ -266,10 +274,6 @@ class PromptStyleItem(BaseModel):
prompt: Optional[str] = Field(title="Prompt") prompt: Optional[str] = Field(title="Prompt")
negative_prompt: Optional[str] = Field(title="Negative Prompt") negative_prompt: Optional[str] = Field(title="Negative Prompt")
class ArtistItem(BaseModel):
name: str = Field(title="Name")
score: float = Field(title="Score")
category: str = Field(title="Category")
class EmbeddingItem(BaseModel): class EmbeddingItem(BaseModel):
step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available") step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available")
@ -286,6 +290,23 @@ class MemoryResponse(BaseModel):
ram: dict = Field(title="RAM", description="System memory stats") ram: dict = Field(title="RAM", description="System memory stats")
cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats") cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats")
class ScriptsList(BaseModel): class ScriptsList(BaseModel):
txt2img: list = Field(default=None,title="Txt2img", description="Titles of scripts (txt2img)") txt2img: list = Field(default=None, title="Txt2img", description="Titles of scripts (txt2img)")
img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)") img2img: list = Field(default=None, title="Img2img", description="Titles of scripts (img2img)")
class ScriptArg(BaseModel):
label: str = Field(default=None, title="Label", description="Name of the argument in UI")
value: Optional[Any] = Field(default=None, title="Value", description="Default value of the argument")
minimum: Optional[Any] = Field(default=None, title="Minimum", description="Minimum allowed value for the argumentin UI")
maximum: Optional[Any] = Field(default=None, title="Minimum", description="Maximum allowed value for the argumentin UI")
step: Optional[Any] = Field(default=None, title="Minimum", description="Step for changing value of the argumentin UI")
choices: Optional[List[str]] = Field(default=None, title="Choices", description="Possible values for the argument")
class ScriptInfo(BaseModel):
name: str = Field(default=None, title="Name", description="Script name")
is_alwayson: bool = Field(default=None, title="IsAlwayson", description="Flag specifying whether this script is an alwayson script")
is_img2img: bool = Field(default=None, title="IsImg2img", description="Flag specifying whether this script is an img2img script")
args: List[ScriptArg] = Field(title="Arguments", description="List of script's arguments")

View File

@ -1,10 +1,9 @@
from functools import wraps
import html import html
import sys
import threading import threading
import traceback
import time import time
from modules import shared, progress from modules import shared, progress, errors
queue_lock = threading.Lock() queue_lock = threading.Lock()
@ -20,17 +19,18 @@ def wrap_queued_call(func):
def wrap_gradio_gpu_call(func, extra_outputs=None): def wrap_gradio_gpu_call(func, extra_outputs=None):
@wraps(func)
def f(*args, **kwargs): def f(*args, **kwargs):
# if the first argument is a string that says "task(...)", it is treated as a job id # if the first argument is a string that says "task(...)", it is treated as a job id
if len(args) > 0 and type(args[0]) == str and args[0][0:5] == "task(" and args[0][-1] == ")": if args and type(args[0]) == str and args[0].startswith("task(") and args[0].endswith(")"):
id_task = args[0] id_task = args[0]
progress.add_task_to_queue(id_task) progress.add_task_to_queue(id_task)
else: else:
id_task = None id_task = None
with queue_lock: with queue_lock:
shared.state.begin() shared.state.begin(job=id_task)
progress.start_task(id_task) progress.start_task(id_task)
try: try:
@ -47,6 +47,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
def wrap_gradio_call(func, extra_outputs=None, add_stats=False): def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
@wraps(func)
def f(*args, extra_outputs_array=extra_outputs, **kwargs): def f(*args, extra_outputs_array=extra_outputs, **kwargs):
run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats
if run_memmon: if run_memmon:
@ -56,16 +57,14 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
try: try:
res = list(func(*args, **kwargs)) res = list(func(*args, **kwargs))
except Exception as e: except Exception as e:
# When printing out our debug argument list, do not print out more than a MB of text # When printing out our debug argument list,
max_debug_str_len = 131072 # (1024*1024)/8 # do not print out more than a 100 KB of text
max_debug_str_len = 131072
print("Error completing request", file=sys.stderr) message = "Error completing request"
argStr = f"Arguments: {args} {kwargs}" arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
print(argStr[:max_debug_str_len], file=sys.stderr) if len(arg_str) > max_debug_str_len:
if len(argStr) > max_debug_str_len: arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
print(f"(Argument list truncated at {max_debug_str_len}/{len(argStr)} characters)", file=sys.stderr) errors.report(f"{message}\n{arg_str}", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
shared.state.job = "" shared.state.job = ""
shared.state.job_count = 0 shared.state.job_count = 0
@ -108,4 +107,3 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
return tuple(res) return tuple(res)
return f return f

View File

@ -1,6 +1,7 @@
import argparse import argparse
import json
import os import os
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
@ -10,9 +11,9 @@ parser.add_argument("--skip-python-version-check", action='store_true', help="la
parser.add_argument("--skip-torch-cuda-test", action='store_true', help="launch.py argument: do not check if CUDA is able to work properly") parser.add_argument("--skip-torch-cuda-test", action='store_true', help="launch.py argument: do not check if CUDA is able to work properly")
parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py argument: install the appropriate version of xformers even if you have some version already installed") parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py argument: install the appropriate version of xformers even if you have some version already installed")
parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed") parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed")
parser.add_argument("--update-check", action='store_true', help="launch.py argument: chck for updates at startup") parser.add_argument("--update-check", action='store_true', help="launch.py argument: check for updates at startup")
parser.add_argument("--tests", type=str, default=None, help="launch.py argument: run tests in the specified directory") parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing")
parser.add_argument("--no-tests", action='store_true', help="launch.py argument: do not run tests even if --tests option is specified") parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages") parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored") parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",) parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
@ -39,7 +40,8 @@ parser.add_argument("--precision", type=str, help="evaluate at this precision",
parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.") parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site") parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None) parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us") parser.add_argument("--ngrok-region", type=str, help="does not do anything.", default="")
parser.add_argument("--ngrok-options", type=json.loads, help='The options to pass to ngrok in JSON format, e.g.: \'{"authtoken_from_env":true, "basic_auth":"user:password", "oauth_provider":"google", "oauth_allow_emails":"user@asdf.com"}\'', default=dict())
parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options") parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer')) parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN')) parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
@ -51,16 +53,16 @@ parser.add_argument("--xformers", action='store_true', help="enable xformers for
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)") parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)")
parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything") parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.") parser.add_argument("--opt-split-attention", action='store_true', help="prefer Doggettx's cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization") parser.add_argument("--opt-sub-quad-attention", action='store_true', help="prefer memory efficient sub-quadratic cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024) parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024)
parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None) parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None)
parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None) parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.") parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="prefer InvokeAI's cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") parser.add_argument("--opt-split-attention-v1", action='store_true', help="prefer older version of split attention optimization for automatic choice of optimization")
parser.add_argument("--opt-sdp-attention", action='store_true', help="enable scaled dot product cross-attention layer optimization; requires PyTorch 2.*") parser.add_argument("--opt-sdp-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.*")
parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="enable scaled dot product cross-attention layer optimization without memory efficient attention, makes image generation deterministic; requires PyTorch 2.*") parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.*")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI") parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower) parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
@ -75,6 +77,7 @@ parser.add_argument("--gradio-auth", type=str, help='set gradio authentication l
parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None) parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything') parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything") parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv')) parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
@ -103,3 +106,4 @@ parser.add_argument("--skip-version-check", action='store_true', help="Do not ch
parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False) parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False)
parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False) parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False)
parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy') parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy')
parser.add_argument('--api-server-stop', action='store_true', help='enable server stop/restart/kill via api')

View File

@ -1,14 +1,12 @@
# this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py # this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py
import math import math
import numpy as np
import torch import torch
from torch import nn, Tensor from torch import nn, Tensor
import torch.nn.functional as F import torch.nn.functional as F
from typing import Optional, List from typing import Optional
from modules.codeformer.vqgan_arch import * from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock
from basicsr.utils import get_root_logger
from basicsr.utils.registry import ARCH_REGISTRY from basicsr.utils.registry import ARCH_REGISTRY
def calc_mean_std(feat, eps=1e-5): def calc_mean_std(feat, eps=1e-5):
@ -163,8 +161,8 @@ class Fuse_sft_block(nn.Module):
class CodeFormer(VQAutoEncoder): class CodeFormer(VQAutoEncoder):
def __init__(self, dim_embd=512, n_head=8, n_layers=9, def __init__(self, dim_embd=512, n_head=8, n_layers=9,
codebook_size=1024, latent_size=256, codebook_size=1024, latent_size=256,
connect_list=['32', '64', '128', '256'], connect_list=('32', '64', '128', '256'),
fix_modules=['quantize','generator']): fix_modules=('quantize', 'generator')):
super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size) super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size)
if fix_modules is not None: if fix_modules is not None:

View File

@ -5,11 +5,9 @@ VQGAN code, adapted from the original created by the Unleashing Transformers aut
https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
''' '''
import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
import copy
from basicsr.utils import get_root_logger from basicsr.utils import get_root_logger
from basicsr.utils.registry import ARCH_REGISTRY from basicsr.utils.registry import ARCH_REGISTRY
@ -328,7 +326,7 @@ class Generator(nn.Module):
@ARCH_REGISTRY.register() @ARCH_REGISTRY.register()
class VQAutoEncoder(nn.Module): class VQAutoEncoder(nn.Module):
def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=[16], codebook_size=1024, emb_dim=256, def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256,
beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None): beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None):
super().__init__() super().__init__()
logger = get_root_logger() logger = get_root_logger()
@ -339,7 +337,7 @@ class VQAutoEncoder(nn.Module):
self.embed_dim = emb_dim self.embed_dim = emb_dim
self.ch_mult = ch_mult self.ch_mult = ch_mult
self.resolution = img_size self.resolution = img_size
self.attn_resolutions = attn_resolutions self.attn_resolutions = attn_resolutions or [16]
self.quantizer_type = quantizer self.quantizer_type = quantizer
self.encoder = Encoder( self.encoder = Encoder(
self.in_channels, self.in_channels,

View File

@ -1,13 +1,11 @@
import os import os
import sys
import traceback
import cv2 import cv2
import torch import torch
import modules.face_restoration import modules.face_restoration
import modules.shared import modules.shared
from modules import shared, devices, modelloader from modules import shared, devices, modelloader, errors
from modules.paths import models_path from modules.paths import models_path
# codeformer people made a choice to include modified basicsr library to their project which makes # codeformer people made a choice to include modified basicsr library to their project which makes
@ -17,14 +15,11 @@ model_dir = "Codeformer"
model_path = os.path.join(models_path, model_dir) model_path = os.path.join(models_path, model_dir)
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth' model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
have_codeformer = False
codeformer = None codeformer = None
def setup_model(dirname): def setup_model(dirname):
global model_path os.makedirs(model_path, exist_ok=True)
if not os.path.exists(model_path):
os.makedirs(model_path)
path = modules.paths.paths.get("CodeFormer", None) path = modules.paths.paths.get("CodeFormer", None)
if path is None: if path is None:
@ -33,11 +28,9 @@ def setup_model(dirname):
try: try:
from torchvision.transforms.functional import normalize from torchvision.transforms.functional import normalize
from modules.codeformer.codeformer_arch import CodeFormer from modules.codeformer.codeformer_arch import CodeFormer
from basicsr.utils.download_util import load_file_from_url from basicsr.utils import img2tensor, tensor2img
from basicsr.utils import imwrite, img2tensor, tensor2img
from facelib.utils.face_restoration_helper import FaceRestoreHelper from facelib.utils.face_restoration_helper import FaceRestoreHelper
from facelib.detection.retinaface import retinaface from facelib.detection.retinaface import retinaface
from modules.shared import cmd_opts
net_class = CodeFormer net_class = CodeFormer
@ -96,7 +89,7 @@ def setup_model(dirname):
self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5) self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
self.face_helper.align_warp_face() self.face_helper.align_warp_face()
for idx, cropped_face in enumerate(self.face_helper.cropped_faces): for cropped_face in self.face_helper.cropped_faces:
cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer) cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer)
@ -107,8 +100,8 @@ def setup_model(dirname):
restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1)) restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
del output del output
torch.cuda.empty_cache() torch.cuda.empty_cache()
except Exception as error: except Exception:
print(f'\tFailed inference for CodeFormer: {error}', file=sys.stderr) errors.report('Failed inference for CodeFormer', exc_info=True)
restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1)) restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
restored_face = restored_face.astype('uint8') restored_face = restored_face.astype('uint8')
@ -129,15 +122,11 @@ def setup_model(dirname):
return restored_img return restored_img
global have_codeformer
have_codeformer = True
global codeformer global codeformer
codeformer = FaceRestorerCodeFormer(dirname) codeformer = FaceRestorerCodeFormer(dirname)
shared.face_restorers.append(codeformer) shared.face_restorers.append(codeformer)
except Exception: except Exception:
print("Error setting up CodeFormer:", file=sys.stderr) errors.report("Error setting up CodeFormer", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
# sys.path = stored_sys_path # sys.path = stored_sys_path

View File

@ -3,8 +3,6 @@ Supports saving and restoring webui and extensions from a known working set of c
""" """
import os import os
import sys
import traceback
import json import json
import time import time
import tqdm import tqdm
@ -13,8 +11,8 @@ from datetime import datetime
from collections import OrderedDict from collections import OrderedDict
import git import git
from modules import shared, extensions from modules import shared, extensions, errors
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir from modules.paths_internal import script_path, config_states_dir
all_config_states = OrderedDict() all_config_states = OrderedDict()
@ -35,7 +33,7 @@ def list_config_states():
j["filepath"] = path j["filepath"] = path
config_states.append(j) config_states.append(j)
config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)) config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)
for cs in config_states: for cs in config_states:
timestamp = time.asctime(time.gmtime(cs["created_at"])) timestamp = time.asctime(time.gmtime(cs["created_at"]))
@ -53,8 +51,7 @@ def get_webui_config():
if os.path.exists(os.path.join(script_path, ".git")): if os.path.exists(os.path.join(script_path, ".git")):
webui_repo = git.Repo(script_path) webui_repo = git.Repo(script_path)
except Exception: except Exception:
print(f"Error reading webui git info from {script_path}:", file=sys.stderr) errors.report(f"Error reading webui git info from {script_path}", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
webui_remote = None webui_remote = None
webui_commit_hash = None webui_commit_hash = None
@ -83,6 +80,8 @@ def get_extension_config():
ext_config = {} ext_config = {}
for ext in extensions.extensions: for ext in extensions.extensions:
ext.read_info_from_repo()
entry = { entry = {
"name": ext.name, "name": ext.name,
"path": ext.path, "path": ext.path,
@ -132,8 +131,7 @@ def restore_webui_config(config):
if os.path.exists(os.path.join(script_path, ".git")): if os.path.exists(os.path.join(script_path, ".git")):
webui_repo = git.Repo(script_path) webui_repo = git.Repo(script_path)
except Exception: except Exception:
print(f"Error reading webui git info from {script_path}:", file=sys.stderr) errors.report(f"Error reading webui git info from {script_path}", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
return return
try: try:
@ -141,8 +139,7 @@ def restore_webui_config(config):
webui_repo.git.reset(webui_commit_hash, hard=True) webui_repo.git.reset(webui_commit_hash, hard=True)
print(f"* Restored webui to commit {webui_commit_hash}.") print(f"* Restored webui to commit {webui_commit_hash}.")
except Exception: except Exception:
print(f"Error restoring webui to commit {webui_commit_hash}:", file=sys.stderr) errors.report(f"Error restoring webui to commit{webui_commit_hash}")
print(traceback.format_exc(), file=sys.stderr)
def restore_extension_config(config): def restore_extension_config(config):

View File

@ -2,7 +2,6 @@ import os
import re import re
import torch import torch
from PIL import Image
import numpy as np import numpy as np
from modules import modelloader, paths, deepbooru_model, devices, images, shared from modules import modelloader, paths, deepbooru_model, devices, images, shared
@ -79,7 +78,7 @@ class DeepDanbooru:
res = [] res = []
filtertags = set([x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")]) filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")}
for tag in [x for x in tags if x not in filtertags]: for tag in [x for x in tags if x not in filtertags]:
probability = probability_dict[tag] probability = probability_dict[tag]

View File

@ -1,5 +1,7 @@
import sys import sys
import contextlib import contextlib
from functools import lru_cache
import torch import torch
from modules import errors from modules import errors
@ -13,13 +15,6 @@ def has_mps() -> bool:
else: else:
return mac_specific.has_mps return mac_specific.has_mps
def extract_device_id(args, name):
for x in range(len(args)):
if name in args[x]:
return args[x + 1]
return None
def get_cuda_device_string(): def get_cuda_device_string():
from modules import shared from modules import shared
@ -65,7 +60,7 @@ def enable_tf32():
# enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
# see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407 # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
if any([torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())]): if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
torch.backends.cudnn.benchmark = True torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cuda.matmul.allow_tf32 = True
@ -154,3 +149,19 @@ def test_for_nans(x, where):
message += " Use --disable-nan-check commandline argument to disable this check." message += " Use --disable-nan-check commandline argument to disable this check."
raise NansException(message) raise NansException(message)
@lru_cache
def first_time_calculation():
"""
just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and
spends about 2.7 seconds doing that, at least wih NVidia.
"""
x = torch.zeros((1, 1)).to(device, dtype)
linear = torch.nn.Linear(1, 1).to(device, dtype)
linear(x)
x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
conv2d(x)

View File

@ -1,8 +1,42 @@
import sys import sys
import textwrap
import traceback import traceback
exception_records = []
def record_exception():
_, e, tb = sys.exc_info()
if e is None:
return
if exception_records and exception_records[-1] == e:
return
exception_records.append((e, tb))
if len(exception_records) > 5:
exception_records.pop(0)
def report(message: str, *, exc_info: bool = False) -> None:
"""
Print an error message to stderr, with optional traceback.
"""
record_exception()
for line in message.splitlines():
print("***", line, file=sys.stderr)
if exc_info:
print(textwrap.indent(traceback.format_exc(), " "), file=sys.stderr)
print("---", file=sys.stderr)
def print_error_explanation(message): def print_error_explanation(message):
record_exception()
lines = message.strip().split("\n") lines = message.strip().split("\n")
max_len = max([len(x) for x in lines]) max_len = max([len(x) for x in lines])
@ -12,9 +46,15 @@ def print_error_explanation(message):
print('=' * max_len, file=sys.stderr) print('=' * max_len, file=sys.stderr)
def display(e: Exception, task): def display(e: Exception, task, *, full_traceback=False):
record_exception()
print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr) print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr) te = traceback.TracebackException.from_exception(e)
if full_traceback:
# include frames leading up to the try-catch block
te.stack = traceback.StackSummary(traceback.extract_stack()[:-2] + te.stack)
print(*te.format(), sep="", file=sys.stderr)
message = str(e) message = str(e)
if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message: if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message:
@ -28,6 +68,8 @@ already_displayed = {}
def display_once(e: Exception, task): def display_once(e: Exception, task):
record_exception()
if task in already_displayed: if task in already_displayed:
return return

View File

@ -1,24 +1,20 @@
import os import sys
import numpy as np import numpy as np
import torch import torch
from PIL import Image from PIL import Image
from basicsr.utils.download_util import load_file_from_url
import modules.esrgan_model_arch as arch import modules.esrgan_model_arch as arch
from modules import shared, modelloader, images, devices from modules import modelloader, images, devices
from modules.upscaler import Upscaler, UpscalerData
from modules.shared import opts from modules.shared import opts
from modules.upscaler import Upscaler, UpscalerData
def mod2normal(state_dict): def mod2normal(state_dict):
# this code is copied from https://github.com/victorca25/iNNfer # this code is copied from https://github.com/victorca25/iNNfer
if 'conv_first.weight' in state_dict: if 'conv_first.weight' in state_dict:
crt_net = {} crt_net = {}
items = [] items = list(state_dict)
for k, v in state_dict.items():
items.append(k)
crt_net['model.0.weight'] = state_dict['conv_first.weight'] crt_net['model.0.weight'] = state_dict['conv_first.weight']
crt_net['model.0.bias'] = state_dict['conv_first.bias'] crt_net['model.0.bias'] = state_dict['conv_first.bias']
@ -52,9 +48,7 @@ def resrgan2normal(state_dict, nb=23):
if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict: if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
re8x = 0 re8x = 0
crt_net = {} crt_net = {}
items = [] items = list(state_dict)
for k, v in state_dict.items():
items.append(k)
crt_net['model.0.weight'] = state_dict['conv_first.weight'] crt_net['model.0.weight'] = state_dict['conv_first.weight']
crt_net['model.0.bias'] = state_dict['conv_first.bias'] crt_net['model.0.bias'] = state_dict['conv_first.bias']
@ -138,7 +132,7 @@ class UpscalerESRGAN(Upscaler):
scaler_data = UpscalerData(self.model_name, self.model_url, self, 4) scaler_data = UpscalerData(self.model_name, self.model_url, self, 4)
scalers.append(scaler_data) scalers.append(scaler_data)
for file in model_paths: for file in model_paths:
if "http" in file: if file.startswith("http"):
name = self.model_name name = self.model_name
else: else:
name = modelloader.friendly_name(file) name = modelloader.friendly_name(file)
@ -147,26 +141,25 @@ class UpscalerESRGAN(Upscaler):
self.scalers.append(scaler_data) self.scalers.append(scaler_data)
def do_upscale(self, img, selected_model): def do_upscale(self, img, selected_model):
try:
model = self.load_model(selected_model) model = self.load_model(selected_model)
if model is None: except Exception as e:
print(f"Unable to load ESRGAN model {selected_model}: {e}", file=sys.stderr)
return img return img
model.to(devices.device_esrgan) model.to(devices.device_esrgan)
img = esrgan_upscale(model, img) img = esrgan_upscale(model, img)
return img return img
def load_model(self, path: str): def load_model(self, path: str):
if "http" in path: if path.startswith("http"):
filename = load_file_from_url( # TODO: this doesn't use `path` at all?
filename = modelloader.load_file_from_url(
url=self.model_url, url=self.model_url,
model_dir=self.model_path, model_dir=self.model_download_path,
file_name=f"{self.model_name}.pth", file_name=f"{self.model_name}.pth",
progress=True,
) )
else: else:
filename = path filename = path
if not os.path.exists(filename) or filename is None:
print(f"Unable to load {self.model_path} from {filename}")
return None
state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None) state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None)

View File

@ -2,7 +2,6 @@
from collections import OrderedDict from collections import OrderedDict
import math import math
import functools
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
@ -438,9 +437,11 @@ def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=
padding = padding if pad_type == 'zero' else 0 padding = padding if pad_type == 'zero' else 0
if convtype=='PartialConv2D': if convtype=='PartialConv2D':
from torchvision.ops import PartialConv2d # this is definitely not going to work, but PartialConv2d doesn't work anyway and this shuts up static analyzer
c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=bias, groups=groups) dilation=dilation, bias=bias, groups=groups)
elif convtype=='DeformConv2D': elif convtype=='DeformConv2D':
from torchvision.ops import DeformConv2d # not tested
c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, bias=bias, groups=groups) dilation=dilation, bias=bias, groups=groups)
elif convtype=='Conv3D': elif convtype=='Conv3D':

View File

@ -1,18 +1,13 @@
import os import os
import sys import threading
import traceback
import time from modules import shared, errors
from datetime import datetime from modules.gitpython_hack import Repo
import git from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
from modules import shared
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path
extensions = [] extensions = []
if not os.path.exists(extensions_dir): os.makedirs(extensions_dir, exist_ok=True)
os.makedirs(extensions_dir)
def active(): def active():
@ -25,6 +20,8 @@ def active():
class Extension: class Extension:
lock = threading.Lock()
def __init__(self, name, path, enabled=True, is_builtin=False): def __init__(self, name, path, enabled=True, is_builtin=False):
self.name = name self.name = name
self.path = path self.path = path
@ -43,15 +40,19 @@ class Extension:
if self.is_builtin or self.have_info_from_repo: if self.is_builtin or self.have_info_from_repo:
return return
self.have_info_from_repo = True with self.lock:
if self.have_info_from_repo:
return
self.do_read_info_from_repo()
def do_read_info_from_repo(self):
repo = None repo = None
try: try:
if os.path.exists(os.path.join(self.path, ".git")): if os.path.exists(os.path.join(self.path, ".git")):
repo = git.Repo(self.path) repo = Repo(self.path)
except Exception: except Exception:
print(f"Error reading github repository info from {self.path}:", file=sys.stderr) errors.report(f"Error reading github repository info from {self.path}", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
if repo is None or repo.bare: if repo is None or repo.bare:
self.remote = None self.remote = None
@ -59,18 +60,19 @@ class Extension:
try: try:
self.status = 'unknown' self.status = 'unknown'
self.remote = next(repo.remote().urls, None) self.remote = next(repo.remote().urls, None)
head = repo.head.commit commit = repo.head.commit
self.commit_date = repo.head.commit.committed_date self.commit_date = commit.committed_date
ts = time.asctime(time.gmtime(self.commit_date))
if repo.active_branch: if repo.active_branch:
self.branch = repo.active_branch.name self.branch = repo.active_branch.name
self.commit_hash = head.hexsha self.commit_hash = commit.hexsha
self.version = f'{self.commit_hash[:8]} ({ts})' self.version = self.commit_hash[:8]
except Exception as ex: except Exception:
print(f"Failed reading extension data from Git repository ({self.name}): {ex}", file=sys.stderr) errors.report(f"Failed reading extension data from Git repository ({self.name})", exc_info=True)
self.remote = None self.remote = None
self.have_info_from_repo = True
def list_files(self, subdir, extension): def list_files(self, subdir, extension):
from modules import scripts from modules import scripts
@ -87,7 +89,7 @@ class Extension:
return res return res
def check_updates(self): def check_updates(self):
repo = git.Repo(self.path) repo = Repo(self.path)
for fetch in repo.remote().fetch(dry_run=True): for fetch in repo.remote().fetch(dry_run=True):
if fetch.flags != fetch.HEAD_UPTODATE: if fetch.flags != fetch.HEAD_UPTODATE:
self.can_update = True self.can_update = True
@ -109,7 +111,7 @@ class Extension:
self.status = "latest" self.status = "latest"
def fetch_and_reset_hard(self, commit='origin'): def fetch_and_reset_hard(self, commit='origin'):
repo = git.Repo(self.path) repo = Repo(self.path)
# Fix: `error: Your local changes to the following files would be overwritten by merge`, # Fix: `error: Your local changes to the following files would be overwritten by merge`,
# because WSL2 Docker set 755 file permissions instead of 644, this results to the error. # because WSL2 Docker set 755 file permissions instead of 644, this results to the error.
repo.git.fetch(all=True) repo.git.fetch(all=True)

View File

@ -14,9 +14,26 @@ def register_extra_network(extra_network):
extra_network_registry[extra_network.name] = extra_network extra_network_registry[extra_network.name] = extra_network
def register_default_extra_networks():
from modules.extra_networks_hypernet import ExtraNetworkHypernet
register_extra_network(ExtraNetworkHypernet())
class ExtraNetworkParams: class ExtraNetworkParams:
def __init__(self, items=None): def __init__(self, items=None):
self.items = items or [] self.items = items or []
self.positional = []
self.named = {}
for item in self.items:
parts = item.split('=', 2) if isinstance(item, str) else [item]
if len(parts) == 2:
self.named[parts[0]] = parts[1]
else:
self.positional.append(item)
def __eq__(self, other):
return self.items == other.items
class ExtraNetwork: class ExtraNetwork:
@ -86,12 +103,15 @@ def activate(p, extra_network_data):
except Exception as e: except Exception as e:
errors.display(e, f"activating extra network {extra_network_name}") errors.display(e, f"activating extra network {extra_network_name}")
if p.scripts is not None:
p.scripts.after_extra_networks_activate(p, batch_number=p.iteration, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds, extra_network_data=extra_network_data)
def deactivate(p, extra_network_data): def deactivate(p, extra_network_data):
"""call deactivate for extra networks in extra_network_data in specified order, then call """call deactivate for extra networks in extra_network_data in specified order, then call
deactivate for all remaining registered networks""" deactivate for all remaining registered networks"""
for extra_network_name, extra_network_args in extra_network_data.items(): for extra_network_name in extra_network_data:
extra_network = extra_network_registry.get(extra_network_name, None) extra_network = extra_network_registry.get(extra_network_name, None)
if extra_network is None: if extra_network is None:
continue continue

View File

@ -1,4 +1,4 @@
from modules import extra_networks, shared, extra_networks from modules import extra_networks, shared
from modules.hypernetworks import hypernetwork from modules.hypernetworks import hypernetwork
@ -9,7 +9,7 @@ class ExtraNetworkHypernet(extra_networks.ExtraNetwork):
def activate(self, p, params_list): def activate(self, p, params_list):
additional = shared.opts.sd_hypernetwork additional = shared.opts.sd_hypernetwork
if additional != "None" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0: if additional != "None" and additional in shared.hypernetworks and not any(x for x in params_list if x.items[0] == additional):
hypernet_prompt_text = f"<hypernet:{additional}:{shared.opts.extra_networks_default_multiplier}>" hypernet_prompt_text = f"<hypernet:{additional}:{shared.opts.extra_networks_default_multiplier}>"
p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts] p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier])) params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
@ -17,7 +17,7 @@ class ExtraNetworkHypernet(extra_networks.ExtraNetwork):
names = [] names = []
multipliers = [] multipliers = []
for params in params_list: for params in params_list:
assert len(params.items) > 0 assert params.items
names.append(params.items[0]) names.append(params.items[0])
multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0) multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)

View File

@ -73,8 +73,7 @@ def to_half(tensor, enable):
def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata): def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata):
shared.state.begin() shared.state.begin(job="model-merge")
shared.state.job = 'model-merge'
def fail(message): def fail(message):
shared.state.textinfo = message shared.state.textinfo = message
@ -136,14 +135,14 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
result_is_instruct_pix2pix_model = False result_is_instruct_pix2pix_model = False
if theta_func2: if theta_func2:
shared.state.textinfo = f"Loading B" shared.state.textinfo = "Loading B"
print(f"Loading {secondary_model_info.filename}...") print(f"Loading {secondary_model_info.filename}...")
theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu') theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu')
else: else:
theta_1 = None theta_1 = None
if theta_func1: if theta_func1:
shared.state.textinfo = f"Loading C" shared.state.textinfo = "Loading C"
print(f"Loading {tertiary_model_info.filename}...") print(f"Loading {tertiary_model_info.filename}...")
theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu') theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu')
@ -242,9 +241,11 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
shared.state.textinfo = "Saving" shared.state.textinfo = "Saving"
print(f"Saving to {output_modelname}...") print(f"Saving to {output_modelname}...")
metadata = {"format": "pt", "sd_merge_models": {}, "sd_merge_recipe": None} metadata = None
if save_metadata: if save_metadata:
metadata = {"format": "pt"}
merge_recipe = { merge_recipe = {
"type": "webui", # indicate this model was merged with webui's built-in merger "type": "webui", # indicate this model was merged with webui's built-in merger
"primary_model_hash": primary_model_info.sha256, "primary_model_hash": primary_model_info.sha256,
@ -262,15 +263,17 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
} }
metadata["sd_merge_recipe"] = json.dumps(merge_recipe) metadata["sd_merge_recipe"] = json.dumps(merge_recipe)
sd_merge_models = {}
def add_model_metadata(checkpoint_info): def add_model_metadata(checkpoint_info):
checkpoint_info.calculate_shorthash() checkpoint_info.calculate_shorthash()
metadata["sd_merge_models"][checkpoint_info.sha256] = { sd_merge_models[checkpoint_info.sha256] = {
"name": checkpoint_info.name, "name": checkpoint_info.name,
"legacy_hash": checkpoint_info.hash, "legacy_hash": checkpoint_info.hash,
"sd_merge_recipe": checkpoint_info.metadata.get("sd_merge_recipe", None) "sd_merge_recipe": checkpoint_info.metadata.get("sd_merge_recipe", None)
} }
metadata["sd_merge_models"].update(checkpoint_info.metadata.get("sd_merge_models", {})) sd_merge_models.update(checkpoint_info.metadata.get("sd_merge_models", {}))
add_model_metadata(primary_model_info) add_model_metadata(primary_model_info)
if secondary_model_info: if secondary_model_info:
@ -278,7 +281,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
if tertiary_model_info: if tertiary_model_info:
add_model_metadata(tertiary_model_info) add_model_metadata(tertiary_model_info)
metadata["sd_merge_models"] = json.dumps(metadata["sd_merge_models"]) metadata["sd_merge_models"] = json.dumps(sd_merge_models)
_, extension = os.path.splitext(output_modelname) _, extension = os.path.splitext(output_modelname)
if extension.lower() == ".safetensors": if extension.lower() == ".safetensors":

View File

@ -1,15 +1,12 @@
import base64 import base64
import html
import io import io
import math import json
import os import os
import re import re
from pathlib import Path
import gradio as gr import gradio as gr
from modules.paths import data_path from modules.paths import data_path
from modules import shared, ui_tempdir, script_callbacks from modules import shared, ui_tempdir, script_callbacks
import tempfile
from PIL import Image from PIL import Image
re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)' re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)'
@ -23,14 +20,14 @@ registered_param_bindings = []
class ParamBinding: class ParamBinding:
def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=[]): def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None):
self.paste_button = paste_button self.paste_button = paste_button
self.tabname = tabname self.tabname = tabname
self.source_text_component = source_text_component self.source_text_component = source_text_component
self.source_image_component = source_image_component self.source_image_component = source_image_component
self.source_tabname = source_tabname self.source_tabname = source_tabname
self.override_settings_component = override_settings_component self.override_settings_component = override_settings_component
self.paste_field_names = paste_field_names self.paste_field_names = paste_field_names or []
def reset(): def reset():
@ -38,20 +35,27 @@ def reset():
def quote(text): def quote(text):
if ',' not in str(text): if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
return text return text
text = str(text) return json.dumps(text, ensure_ascii=False)
text = text.replace('\\', '\\\\')
text = text.replace('"', '\\"')
return f'"{text}"' def unquote(text):
if len(text) == 0 or text[0] != '"' or text[-1] != '"':
return text
try:
return json.loads(text)
except Exception:
return text
def image_from_url_text(filedata): def image_from_url_text(filedata):
if filedata is None: if filedata is None:
return None return None
if type(filedata) == list and len(filedata) > 0 and type(filedata[0]) == dict and filedata[0].get("is_file", False): if type(filedata) == list and filedata and type(filedata[0]) == dict and filedata[0].get("is_file", False):
filedata = filedata[0] filedata = filedata[0]
if type(filedata) == dict and filedata.get("is_file", False): if type(filedata) == dict and filedata.get("is_file", False):
@ -170,31 +174,6 @@ def send_image_and_dimensions(x):
return img, w, h return img, w, h
def find_hypernetwork_key(hypernet_name, hypernet_hash=None):
"""Determines the config parameter name to use for the hypernet based on the parameters in the infotext.
Example: an infotext provides "Hypernet: ke-ta" and "Hypernet hash: 1234abcd". For the "Hypernet" config
parameter this means there should be an entry that looks like "ke-ta-10000(1234abcd)" to set it to.
If the infotext has no hash, then a hypernet with the same name will be selected instead.
"""
hypernet_name = hypernet_name.lower()
if hypernet_hash is not None:
# Try to match the hash in the name
for hypernet_key in shared.hypernetworks.keys():
result = re_hypernet_hash.search(hypernet_key)
if result is not None and result[1] == hypernet_hash:
return hypernet_key
else:
# Fall back to a hypernet with the same name
for hypernet_key in shared.hypernetworks.keys():
if hypernet_key.lower().startswith(hypernet_name):
return hypernet_key
return None
def restore_old_hires_fix_params(res): def restore_old_hires_fix_params(res):
"""for infotexts that specify old First pass size parameter, convert it into """for infotexts that specify old First pass size parameter, convert it into
width, height, and hr scale""" width, height, and hr scale"""
@ -251,28 +230,40 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
lines.append(lastline) lines.append(lastline)
lastline = '' lastline = ''
for i, line in enumerate(lines): for line in lines:
line = line.strip() line = line.strip()
if line.startswith("Negative prompt:"): if line.startswith("Negative prompt:"):
done_with_prompt = True done_with_prompt = True
line = line[16:].strip() line = line[16:].strip()
if done_with_prompt: if done_with_prompt:
negative_prompt += ("" if negative_prompt == "" else "\n") + line negative_prompt += ("" if negative_prompt == "" else "\n") + line
else: else:
prompt += ("" if prompt == "" else "\n") + line prompt += ("" if prompt == "" else "\n") + line
if shared.opts.infotext_styles != "Ignore":
found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt)
if shared.opts.infotext_styles == "Apply":
res["Styles array"] = found_styles
elif shared.opts.infotext_styles == "Apply if any" and found_styles:
res["Styles array"] = found_styles
res["Prompt"] = prompt res["Prompt"] = prompt
res["Negative prompt"] = negative_prompt res["Negative prompt"] = negative_prompt
for k, v in re_param.findall(lastline): for k, v in re_param.findall(lastline):
v = v[1:-1] if v[0] == '"' and v[-1] == '"' else v try:
if v[0] == '"' and v[-1] == '"':
v = unquote(v)
m = re_imagesize.match(v) m = re_imagesize.match(v)
if m is not None: if m is not None:
res[f"{k}-1"] = m.group(1) res[f"{k}-1"] = m.group(1)
res[f"{k}-2"] = m.group(2) res[f"{k}-2"] = m.group(2)
else: else:
res[k] = v res[k] = v
except Exception:
print(f"Error parsing \"{k}: {v}\"")
# Missing CLIP skip means it was set to 1 (the default) # Missing CLIP skip means it was set to 1 (the default)
if "Clip skip" not in res: if "Clip skip" not in res:
@ -286,24 +277,45 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
res["Hires resize-1"] = 0 res["Hires resize-1"] = 0
res["Hires resize-2"] = 0 res["Hires resize-2"] = 0
if "Hires sampler" not in res:
res["Hires sampler"] = "Use same sampler"
if "Hires prompt" not in res:
res["Hires prompt"] = ""
if "Hires negative prompt" not in res:
res["Hires negative prompt"] = ""
restore_old_hires_fix_params(res) restore_old_hires_fix_params(res)
# Missing RNG means the default was set, which is GPU RNG # Missing RNG means the default was set, which is GPU RNG
if "RNG" not in res: if "RNG" not in res:
res["RNG"] = "GPU" res["RNG"] = "GPU"
if "Schedule type" not in res:
res["Schedule type"] = "Automatic"
if "Schedule max sigma" not in res:
res["Schedule max sigma"] = 0
if "Schedule min sigma" not in res:
res["Schedule min sigma"] = 0
if "Schedule rho" not in res:
res["Schedule rho"] = 0
return res return res
settings_map = {}
infotext_to_setting_name_mapping = [ infotext_to_setting_name_mapping = [
('Clip skip', 'CLIP_stop_at_last_layers', ), ('Clip skip', 'CLIP_stop_at_last_layers', ),
('Conditional mask weight', 'inpainting_mask_weight'), ('Conditional mask weight', 'inpainting_mask_weight'),
('Model hash', 'sd_model_checkpoint'), ('Model hash', 'sd_model_checkpoint'),
('ENSD', 'eta_noise_seed_delta'), ('ENSD', 'eta_noise_seed_delta'),
('Schedule type', 'k_sched_type'),
('Schedule max sigma', 'sigma_max'),
('Schedule min sigma', 'sigma_min'),
('Schedule rho', 'rho'),
('Noise multiplier', 'initial_noise_multiplier'), ('Noise multiplier', 'initial_noise_multiplier'),
('Eta', 'eta_ancestral'), ('Eta', 'eta_ancestral'),
('Eta DDIM', 'eta_ddim'), ('Eta DDIM', 'eta_ddim'),
@ -312,8 +324,11 @@ infotext_to_setting_name_mapping = [
('UniPC skip type', 'uni_pc_skip_type'), ('UniPC skip type', 'uni_pc_skip_type'),
('UniPC order', 'uni_pc_order'), ('UniPC order', 'uni_pc_order'),
('UniPC lower order final', 'uni_pc_lower_order_final'), ('UniPC lower order final', 'uni_pc_lower_order_final'),
('Token merging ratio', 'token_merging_ratio'),
('Token merging ratio hr', 'token_merging_ratio_hr'),
('RNG', 'randn_source'), ('RNG', 'randn_source'),
('NGMS', 's_min_uncond'), ('NGMS', 's_min_uncond'),
('Pad conds', 'pad_cond_uncond'),
] ]
@ -405,7 +420,7 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component,
vals_pairs = [f"{k}: {v}" for k, v in vals.items()] vals_pairs = [f"{k}: {v}" for k, v in vals.items()]
return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=len(vals_pairs) > 0) return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=bool(vals_pairs))
paste_fields = paste_fields + [(override_settings_component, paste_settings)] paste_fields = paste_fields + [(override_settings_component, paste_settings)]
@ -422,5 +437,3 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component,
outputs=[], outputs=[],
show_progress=False, show_progress=False,
) )

View File

@ -1,12 +1,10 @@
import os import os
import sys
import traceback
import facexlib import facexlib
import gfpgan import gfpgan
import modules.face_restoration import modules.face_restoration
from modules import paths, shared, devices, modelloader from modules import paths, shared, devices, modelloader, errors
model_dir = "GFPGAN" model_dir = "GFPGAN"
user_path = None user_path = None
@ -27,7 +25,7 @@ def gfpgann():
return None return None
models = modelloader.load_models(model_path, model_url, user_path, ext_filter="GFPGAN") models = modelloader.load_models(model_path, model_url, user_path, ext_filter="GFPGAN")
if len(models) == 1 and "http" in models[0]: if len(models) == 1 and models[0].startswith("http"):
model_file = models[0] model_file = models[0]
elif len(models) != 0: elif len(models) != 0:
latest_file = max(models, key=os.path.getctime) latest_file = max(models, key=os.path.getctime)
@ -72,13 +70,10 @@ gfpgan_constructor = None
def setup_model(dirname): def setup_model(dirname):
global model_path
if not os.path.exists(model_path):
os.makedirs(model_path)
try: try:
os.makedirs(model_path, exist_ok=True)
from gfpgan import GFPGANer from gfpgan import GFPGANer
from facexlib import detection, parsing from facexlib import detection, parsing # noqa: F401
global user_path global user_path
global have_gfpgan global have_gfpgan
global gfpgan_constructor global gfpgan_constructor
@ -112,5 +107,4 @@ def setup_model(dirname):
shared.face_restorers.append(FaceRestorerGFPGAN()) shared.face_restorers.append(FaceRestorerGFPGAN())
except Exception: except Exception:
print("Error setting up GFPGAN:", file=sys.stderr) errors.report("Error setting up GFPGAN", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)

42
modules/gitpython_hack.py Normal file
View File

@ -0,0 +1,42 @@
from __future__ import annotations
import io
import subprocess
import git
class Git(git.Git):
"""
Git subclassed to never use persistent processes.
"""
def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs):
raise NotImplementedError(f"Refusing to use persistent process: {attr_name} ({cmd_name} {args} {kwargs})")
def get_object_header(self, ref: str | bytes) -> tuple[str, str, int]:
ret = subprocess.check_output(
[self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch-check"],
input=self._prepare_ref(ref),
cwd=self._working_dir,
timeout=2,
)
return self._parse_object_header(ret)
def stream_object_data(self, ref: str) -> tuple[str, str, int, "Git.CatFileContentStream"]:
# Not really streaming, per se; this buffers the entire object in memory.
# Shouldn't be a problem for our use case, since we're only using this for
# object headers (commit objects).
ret = subprocess.check_output(
[self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch"],
input=self._prepare_ref(ref),
cwd=self._working_dir,
timeout=30,
)
bio = io.BytesIO(ret)
hexsha, typename, size = self._parse_object_header(bio.readline())
return (hexsha, typename, size, self.CatFileContentStream(size, bio))
class Repo(git.Repo):
GitCommandWrapperType = Git

View File

@ -46,8 +46,8 @@ def calculate_sha256(filename):
return hash_sha256.hexdigest() return hash_sha256.hexdigest()
def sha256_from_cache(filename, title): def sha256_from_cache(filename, title, use_addnet_hash=False):
hashes = cache("hashes") hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes")
ondisk_mtime = os.path.getmtime(filename) ondisk_mtime = os.path.getmtime(filename)
if title not in hashes: if title not in hashes:
@ -62,10 +62,10 @@ def sha256_from_cache(filename, title):
return cached_sha256 return cached_sha256
def sha256(filename, title): def sha256(filename, title, use_addnet_hash=False):
hashes = cache("hashes") hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes")
sha256_value = sha256_from_cache(filename, title) sha256_value = sha256_from_cache(filename, title, use_addnet_hash)
if sha256_value is not None: if sha256_value is not None:
return sha256_value return sha256_value
@ -73,6 +73,10 @@ def sha256(filename, title):
return None return None
print(f"Calculating sha256 for {filename}: ", end='') print(f"Calculating sha256 for {filename}: ", end='')
if use_addnet_hash:
with open(filename, "rb") as file:
sha256_value = addnet_hash_safetensors(file)
else:
sha256_value = calculate_sha256(filename) sha256_value = calculate_sha256(filename)
print(f"{sha256_value}") print(f"{sha256_value}")
@ -86,6 +90,19 @@ def sha256(filename, title):
return sha256_value return sha256_value
def addnet_hash_safetensors(b):
"""kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py"""
hash_sha256 = hashlib.sha256()
blksize = 1024 * 1024
b.seek(0)
header = b.read(8)
n = int.from_bytes(header, "little")
offset = n + 8
b.seek(offset)
for chunk in iter(lambda: b.read(blksize), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()

View File

@ -1,10 +1,7 @@
import csv
import datetime import datetime
import glob import glob
import html import html
import os import os
import sys
import traceback
import inspect import inspect
import modules.textual_inversion.dataset import modules.textual_inversion.dataset
@ -12,13 +9,13 @@ import torch
import tqdm import tqdm
from einops import rearrange, repeat from einops import rearrange, repeat
from ldm.util import default from ldm.util import default
from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors
from modules.textual_inversion import textual_inversion, logging from modules.textual_inversion import textual_inversion, logging
from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum from torch import einsum
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
from collections import defaultdict, deque from collections import deque
from statistics import stdev, mean from statistics import stdev, mean
@ -178,34 +175,34 @@ class Hypernetwork:
def weights(self): def weights(self):
res = [] res = []
for k, layers in self.layers.items(): for layers in self.layers.values():
for layer in layers: for layer in layers:
res += layer.parameters() res += layer.parameters()
return res return res
def train(self, mode=True): def train(self, mode=True):
for k, layers in self.layers.items(): for layers in self.layers.values():
for layer in layers: for layer in layers:
layer.train(mode=mode) layer.train(mode=mode)
for param in layer.parameters(): for param in layer.parameters():
param.requires_grad = mode param.requires_grad = mode
def to(self, device): def to(self, device):
for k, layers in self.layers.items(): for layers in self.layers.values():
for layer in layers: for layer in layers:
layer.to(device) layer.to(device)
return self return self
def set_multiplier(self, multiplier): def set_multiplier(self, multiplier):
for k, layers in self.layers.items(): for layers in self.layers.values():
for layer in layers: for layer in layers:
layer.multiplier = multiplier layer.multiplier = multiplier
return self return self
def eval(self): def eval(self):
for k, layers in self.layers.items(): for layers in self.layers.values():
for layer in layers: for layer in layers:
layer.eval() layer.eval()
for param in layer.parameters(): for param in layer.parameters():
@ -326,16 +323,13 @@ def load_hypernetwork(name):
if path is None: if path is None:
return None return None
hypernetwork = Hypernetwork()
try: try:
hypernetwork = Hypernetwork()
hypernetwork.load(path) hypernetwork.load(path)
except Exception:
print(f"Error loading hypernetwork {path}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return None
return hypernetwork return hypernetwork
except Exception:
errors.report(f"Error loading hypernetwork {path}", exc_info=True)
return None
def load_hypernetworks(names, multipliers=None): def load_hypernetworks(names, multipliers=None):
@ -359,17 +353,6 @@ def load_hypernetworks(names, multipliers=None):
shared.loaded_hypernetworks.append(hypernetwork) shared.loaded_hypernetworks.append(hypernetwork)
def find_closest_hypernetwork_name(search: str):
if not search:
return None
search = search.lower()
applicable = [name for name in shared.hypernetworks if search in name.lower()]
if not applicable:
return None
applicable = sorted(applicable, key=lambda name: len(name))
return applicable[0]
def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None): def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None):
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None) hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None)
@ -404,7 +387,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None):
k = self.to_k(context_k) k = self.to_k(context_k)
v = self.to_v(context_v) v = self.to_v(context_v)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v))
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
@ -452,18 +435,6 @@ def statistics(data):
return total_information, recent_information return total_information, recent_information
def report_statistics(loss_info:dict):
keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
for key in keys:
try:
print("Loss statistics for file " + key)
info, recent = statistics(list(loss_info[key]))
print(info)
print(recent)
except Exception as e:
print(e)
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
# Remove illegal characters from name. # Remove illegal characters from name.
name = "".join( x for x in name if (x.isalnum() or x in "._- ")) name = "".join( x for x in name if (x.isalnum() or x in "._- "))
@ -620,7 +591,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
try: try:
sd_hijack_checkpoint.add() sd_hijack_checkpoint.add()
for i in range((steps-initial_step) * gradient_step): for _ in range((steps-initial_step) * gradient_step):
if scheduler.finished: if scheduler.finished:
break break
if shared.state.interrupted: if shared.state.interrupted:
@ -771,12 +742,11 @@ Last saved image: {html.escape(last_saved_image)}<br/>
</p> </p>
""" """
except Exception: except Exception:
print(traceback.format_exc(), file=sys.stderr) errors.report("Exception in training hypernetwork", exc_info=True)
finally: finally:
pbar.leave = False pbar.leave = False
pbar.close() pbar.close()
hypernetwork.eval() hypernetwork.eval()
#report_statistics(loss_dict)
sd_hijack_checkpoint.remove() sd_hijack_checkpoint.remove()

View File

@ -1,19 +1,17 @@
import html import html
import os
import re
import gradio as gr import gradio as gr
import modules.hypernetworks.hypernetwork import modules.hypernetworks.hypernetwork
from modules import devices, sd_hijack, shared from modules import devices, sd_hijack, shared
not_available = ["hardswish", "multiheadattention"] not_available = ["hardswish", "multiheadattention"]
keys = list(x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available) keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available]
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None): def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure) filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure)
return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {filename}", "" return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", ""
def train_hypernetwork(*args): def train_hypernetwork(*args):

View File

@ -1,6 +1,6 @@
from __future__ import annotations
import datetime import datetime
import sys
import traceback
import pytz import pytz
import io import io
@ -12,18 +12,27 @@ import re
import numpy as np import numpy as np
import piexif import piexif
import piexif.helper import piexif.helper
from PIL import Image, ImageFont, ImageDraw, PngImagePlugin from PIL import Image, ImageFont, ImageDraw, ImageColor, PngImagePlugin
from fonts.ttf import Roboto
import string import string
import json import json
import hashlib import hashlib
from modules import sd_samplers, shared, script_callbacks, errors from modules import sd_samplers, shared, script_callbacks, errors
from modules.shared import opts, cmd_opts from modules.paths_internal import roboto_ttf_file
from modules.shared import opts
import modules.sd_vae as sd_vae
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
def get_font(fontsize: int):
try:
return ImageFont.truetype(opts.font or roboto_ttf_file, fontsize)
except Exception:
return ImageFont.truetype(roboto_ttf_file, fontsize)
def image_grid(imgs, batch_size=1, rows=None): def image_grid(imgs, batch_size=1, rows=None):
if rows is None: if rows is None:
if opts.n_rows > 0: if opts.n_rows > 0:
@ -132,6 +141,11 @@ class GridAnnotation:
def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0): def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
color_active = ImageColor.getcolor(opts.grid_text_active_color, 'RGB')
color_inactive = ImageColor.getcolor(opts.grid_text_inactive_color, 'RGB')
color_background = ImageColor.getcolor(opts.grid_background_color, 'RGB')
def wrap(drawing, text, font, line_length): def wrap(drawing, text, font, line_length):
lines = [''] lines = ['']
for word in text.split(): for word in text.split():
@ -142,14 +156,8 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
lines.append(word) lines.append(word)
return lines return lines
def get_font(fontsize):
try:
return ImageFont.truetype(opts.font or Roboto, fontsize)
except Exception:
return ImageFont.truetype(Roboto, fontsize)
def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize): def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize):
for i, line in enumerate(lines): for line in lines:
fnt = initial_fnt fnt = initial_fnt
fontsize = initial_fontsize fontsize = initial_fontsize
while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0: while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0:
@ -167,9 +175,6 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
fnt = get_font(fontsize) fnt = get_font(fontsize)
color_active = (0, 0, 0)
color_inactive = (153, 153, 153)
pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4 pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
cols = im.width // width cols = im.width // width
@ -178,7 +183,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}' assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}' assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
calc_img = Image.new("RGB", (1, 1), "white") calc_img = Image.new("RGB", (1, 1), color_background)
calc_d = ImageDraw.Draw(calc_img) calc_d = ImageDraw.Draw(calc_img)
for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)): for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
@ -199,7 +204,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2 pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + margin * (rows-1)), "white") result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + margin * (rows-1)), color_background)
for row in range(rows): for row in range(rows):
for col in range(cols): for col in range(cols):
@ -335,8 +340,20 @@ def sanitize_filename_part(text, replace_spaces=True):
class FilenameGenerator: class FilenameGenerator:
def get_vae_filename(self): #get the name of the VAE file.
if sd_vae.loaded_vae_file is None:
return "NoneType"
file_name = os.path.basename(sd_vae.loaded_vae_file)
split_file_name = file_name.split('.')
if len(split_file_name) > 1 and split_file_name[0] == '':
return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
else:
return split_file_name[0]
replacements = { replacements = {
'seed': lambda self: self.seed if self.seed is not None else '', 'seed': lambda self: self.seed if self.seed is not None else '',
'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0],
'seed_last': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.all_seeds[-1],
'steps': lambda self: self.p and self.p.steps, 'steps': lambda self: self.p and self.p.steps,
'cfg': lambda self: self.p and self.p.cfg_scale, 'cfg': lambda self: self.p and self.p.cfg_scale,
'width': lambda self: self.image.width, 'width': lambda self: self.image.width,
@ -353,19 +370,23 @@ class FilenameGenerator:
'prompt_no_styles': lambda self: self.prompt_no_style(), 'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False), 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
'prompt_words': lambda self: self.prompt_words(), 'prompt_words': lambda self: self.prompt_words(),
'batch_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.batch_index + 1, 'batch_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 or self.zip else self.p.batch_index + 1,
'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.n_iter == 1 and self.p.batch_size == 1 else self.p.iteration * self.p.batch_size + self.p.batch_index + 1, 'batch_size': lambda self: self.p.batch_size,
'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if (self.p.n_iter == 1 and self.p.batch_size == 1) or self.zip else self.p.iteration * self.p.batch_size + self.p.batch_index + 1,
'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt<prompt1|default><prompt2>..] 'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt<prompt1|default><prompt2>..]
'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"], 'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"],
'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT, 'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
'user': lambda self: self.p.user,
'vae_filename': lambda self: self.get_vae_filename(),
} }
default_time_format = '%Y%m%d%H%M%S' default_time_format = '%Y%m%d%H%M%S'
def __init__(self, p, seed, prompt, image): def __init__(self, p, seed, prompt, image, zip=False):
self.p = p self.p = p
self.seed = seed self.seed = seed
self.prompt = prompt self.prompt = prompt
self.image = image self.image = image
self.zip = zip
def hasprompt(self, *args): def hasprompt(self, *args):
lower = self.prompt.lower() lower = self.prompt.lower()
@ -389,7 +410,7 @@ class FilenameGenerator:
prompt_no_style = self.prompt prompt_no_style = self.prompt
for style in shared.prompt_styles.get_style_prompts(self.p.styles): for style in shared.prompt_styles.get_style_prompts(self.p.styles):
if len(style) > 0: if style:
for part in style.split("{prompt}"): for part in style.split("{prompt}"):
prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',') prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',')
@ -398,7 +419,7 @@ class FilenameGenerator:
return sanitize_filename_part(prompt_no_style, replace_spaces=False) return sanitize_filename_part(prompt_no_style, replace_spaces=False)
def prompt_words(self): def prompt_words(self):
words = [x for x in re_nonletters.split(self.prompt or "") if len(x) > 0] words = [x for x in re_nonletters.split(self.prompt or "") if x]
if len(words) == 0: if len(words) == 0:
words = ["empty"] words = ["empty"]
return sanitize_filename_part(" ".join(words[0:opts.directories_max_prompt_words]), replace_spaces=False) return sanitize_filename_part(" ".join(words[0:opts.directories_max_prompt_words]), replace_spaces=False)
@ -406,16 +427,16 @@ class FilenameGenerator:
def datetime(self, *args): def datetime(self, *args):
time_datetime = datetime.datetime.now() time_datetime = datetime.datetime.now()
time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format time_format = args[0] if (args and args[0] != "") else self.default_time_format
try: try:
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
except pytz.exceptions.UnknownTimeZoneError as _: except pytz.exceptions.UnknownTimeZoneError:
time_zone = None time_zone = None
time_zone_time = time_datetime.astimezone(time_zone) time_zone_time = time_datetime.astimezone(time_zone)
try: try:
formatted_time = time_zone_time.strftime(time_format) formatted_time = time_zone_time.strftime(time_format)
except (ValueError, TypeError) as _: except (ValueError, TypeError):
formatted_time = time_zone_time.strftime(self.default_time_format) formatted_time = time_zone_time.strftime(self.default_time_format)
return sanitize_filename_part(formatted_time, replace_spaces=False) return sanitize_filename_part(formatted_time, replace_spaces=False)
@ -445,8 +466,7 @@ class FilenameGenerator:
replacement = fun(self, *pattern_args) replacement = fun(self, *pattern_args)
except Exception: except Exception:
replacement = None replacement = None
print(f"Error adding [{pattern}] to filename", file=sys.stderr) errors.report(f"Error adding [{pattern}] to filename", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
if replacement == NOTHING_AND_SKIP_PREVIOUS_TEXT: if replacement == NOTHING_AND_SKIP_PREVIOUS_TEXT:
continue continue
@ -472,15 +492,61 @@ def get_next_sequence_number(path, basename):
prefix_length = len(basename) prefix_length = len(basename)
for p in os.listdir(path): for p in os.listdir(path):
if p.startswith(basename): if p.startswith(basename):
l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element) parts = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
try: try:
result = max(int(l[0]), result) result = max(int(parts[0]), result)
except ValueError: except ValueError:
pass pass
return result + 1 return result + 1
def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_pnginfo=None, pnginfo_section_name='parameters'):
"""
Saves image to filename, including geninfo as text information for generation info.
For PNG images, geninfo is added to existing pnginfo dictionary using the pnginfo_section_name argument as key.
For JPG images, there's no dictionary and geninfo just replaces the EXIF description.
"""
if extension is None:
extension = os.path.splitext(filename)[1]
image_format = Image.registered_extensions()[extension]
if extension.lower() == '.png':
existing_pnginfo = existing_pnginfo or {}
if opts.enable_pnginfo:
existing_pnginfo[pnginfo_section_name] = geninfo
if opts.enable_pnginfo:
pnginfo_data = PngImagePlugin.PngInfo()
for k, v in (existing_pnginfo or {}).items():
pnginfo_data.add_text(k, str(v))
else:
pnginfo_data = None
image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
elif extension.lower() in (".jpg", ".jpeg", ".webp"):
if image.mode == 'RGBA':
image = image.convert("RGB")
elif image.mode == 'I;16':
image = image.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L")
image.save(filename, format=image_format, quality=opts.jpeg_quality, lossless=opts.webp_lossless)
if opts.enable_pnginfo and geninfo is not None:
exif_bytes = piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or "", encoding="unicode")
},
})
piexif.insert(exif_bytes, filename)
else:
image.save(filename, format=image_format, quality=opts.jpeg_quality)
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None): def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
"""Save an image. """Save an image.
@ -565,38 +631,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
info = params.pnginfo.get(pnginfo_section_name, None) info = params.pnginfo.get(pnginfo_section_name, None)
def _atomically_save_image(image_to_save, filename_without_extension, extension): def _atomically_save_image(image_to_save, filename_without_extension, extension):
# save image with .tmp extension to avoid race condition when another process detects new image in the directory """
save image with .tmp extension to avoid race condition when another process detects new image in the directory
"""
temp_file_path = f"{filename_without_extension}.tmp" temp_file_path = f"{filename_without_extension}.tmp"
image_format = Image.registered_extensions()[extension]
if extension.lower() == '.png': save_image_with_geninfo(image_to_save, info, temp_file_path, extension, existing_pnginfo=params.pnginfo, pnginfo_section_name=pnginfo_section_name)
pnginfo_data = PngImagePlugin.PngInfo()
if opts.enable_pnginfo:
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
elif extension.lower() in (".jpg", ".jpeg", ".webp"):
if image_to_save.mode == 'RGBA':
image_to_save = image_to_save.convert("RGB")
elif image_to_save.mode == 'I;16':
image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L")
image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, lossless=opts.webp_lossless)
if opts.enable_pnginfo and info is not None:
exif_bytes = piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
},
})
piexif.insert(exif_bytes, temp_file_path)
else:
image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality)
# atomically rename the file with correct extension
os.replace(temp_file_path, filename_without_extension + extension) os.replace(temp_file_path, filename_without_extension + extension)
fullfn_without_extension, extension = os.path.splitext(params.filename) fullfn_without_extension, extension = os.path.splitext(params.filename)
@ -612,12 +653,18 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
oversize = image.width > opts.target_side_length or image.height > opts.target_side_length oversize = image.width > opts.target_side_length or image.height > opts.target_side_length
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024): if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024):
ratio = image.width / image.height ratio = image.width / image.height
resize_to = None
if oversize and ratio > 1: if oversize and ratio > 1:
image = image.resize((round(opts.target_side_length), round(image.height * opts.target_side_length / image.width)), LANCZOS) resize_to = round(opts.target_side_length), round(image.height * opts.target_side_length / image.width)
elif oversize: elif oversize:
image = image.resize((round(image.width * opts.target_side_length / image.height), round(opts.target_side_length)), LANCZOS) resize_to = round(image.width * opts.target_side_length / image.height), round(opts.target_side_length)
if resize_to is not None:
try:
# Resizing image with LANCZOS could throw an exception if e.g. image mode is I;16
image = image.resize(resize_to, LANCZOS)
except Exception:
image = image.resize(resize_to)
try: try:
_atomically_save_image(image, fullfn_without_extension, ".jpg") _atomically_save_image(image, fullfn_without_extension, ".jpg")
except Exception as e: except Exception as e:
@ -635,8 +682,15 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
return fullfn, txt_fullfn return fullfn, txt_fullfn
def read_info_from_image(image): IGNORED_INFO_KEYS = {
items = image.info or {} 'jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
'loop', 'background', 'timestamp', 'duration', 'progressive', 'progression',
'icc_profile', 'chromaticity', 'photoshop',
}
def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]:
items = (image.info or {}).copy()
geninfo = items.pop('parameters', None) geninfo = items.pop('parameters', None)
@ -652,8 +706,7 @@ def read_info_from_image(image):
items['exif comment'] = exif_comment items['exif comment'] = exif_comment
geninfo = exif_comment geninfo = exif_comment
for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif', for field in IGNORED_INFO_KEYS:
'loop', 'background', 'timestamp', 'duration']:
items.pop(field, None) items.pop(field, None)
if items.get("Software", None) == "NovelAI": if items.get("Software", None) == "NovelAI":
@ -665,8 +718,7 @@ def read_info_from_image(image):
Negative prompt: {json_info["uc"]} Negative prompt: {json_info["uc"]}
Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337""" Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
except Exception: except Exception:
print("Error parsing NovelAI image generation parameters:", file=sys.stderr) errors.report("Error parsing NovelAI image generation parameters", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
return geninfo, items return geninfo, items

View File

@ -1,23 +1,21 @@
import math
import os import os
import sys from pathlib import Path
import traceback
import numpy as np import numpy as np
from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError
import gradio as gr
from modules import devices, sd_samplers from modules import sd_samplers, images as imgutil
from modules.generation_parameters_copypaste import create_override_settings_dict from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state from modules.shared import opts, state
import modules.shared as shared import modules.shared as shared
import modules.processing as processing import modules.processing as processing
from modules.ui import plaintext_to_html from modules.ui import plaintext_to_html
import modules.images as images
import modules.scripts import modules.scripts
def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
processing.fix_seed(p) processing.fix_seed(p)
images = [] images = []
@ -31,7 +29,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
is_inpaint_batch = False is_inpaint_batch = False
if inpaint_mask_dir: if inpaint_mask_dir:
inpaint_masks = shared.listfiles(inpaint_mask_dir) inpaint_masks = shared.listfiles(inpaint_mask_dir)
is_inpaint_batch = len(inpaint_masks) > 0 is_inpaint_batch = bool(inpaint_masks)
if is_inpaint_batch: if is_inpaint_batch:
print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.") print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
@ -44,6 +43,14 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
state.job_count = len(images) * p.n_iter state.job_count = len(images) * p.n_iter
# extract "default" params to use in case getting png info fails
prompt = p.prompt
negative_prompt = p.negative_prompt
seed = p.seed
cfg_scale = p.cfg_scale
sampler_name = p.sampler_name
steps = p.steps
for i, image in enumerate(images): for i, image in enumerate(images):
state.job = f"{i+1} out of {len(images)}" state.job = f"{i+1} out of {len(images)}"
if state.skipped: if state.skipped:
@ -59,23 +66,59 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
continue continue
# Use the EXIF orientation of photos taken by smartphones. # Use the EXIF orientation of photos taken by smartphones.
img = ImageOps.exif_transpose(img) img = ImageOps.exif_transpose(img)
if to_scale:
p.width = int(img.width * scale_by)
p.height = int(img.height * scale_by)
p.init_images = [img] * p.batch_size p.init_images = [img] * p.batch_size
image_path = Path(image)
if is_inpaint_batch: if is_inpaint_batch:
# try to find corresponding mask for an image using simple filename matching # try to find corresponding mask for an image using simple filename matching
mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image)) if len(inpaint_masks) == 1:
# if not found use first one ("same mask for all images" use-case)
if not mask_image_path in inpaint_masks:
mask_image_path = inpaint_masks[0] mask_image_path = inpaint_masks[0]
else:
# try to find corresponding mask for an image using simple filename matching
mask_image_dir = Path(inpaint_mask_dir)
masks_found = list(mask_image_dir.glob(f"{image_path.stem}.*"))
if len(masks_found) == 0:
print(f"Warning: mask is not found for {image_path} in {mask_image_dir}. Skipping it.")
continue
# it should contain only 1 matching mask
# otherwise user has many masks with the same name but different extensions
mask_image_path = masks_found[0]
mask_image = Image.open(mask_image_path) mask_image = Image.open(mask_image_path)
p.image_mask = mask_image p.image_mask = mask_image
if use_png_info:
try:
info_img = img
if png_info_dir:
info_img_path = os.path.join(png_info_dir, os.path.basename(image))
info_img = Image.open(info_img_path)
geninfo, _ = imgutil.read_info_from_image(info_img)
parsed_parameters = parse_generation_parameters(geninfo)
parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})}
except Exception:
parsed_parameters = {}
p.prompt = prompt + (" " + parsed_parameters["Prompt"] if "Prompt" in parsed_parameters else "")
p.negative_prompt = negative_prompt + (" " + parsed_parameters["Negative prompt"] if "Negative prompt" in parsed_parameters else "")
p.seed = int(parsed_parameters.get("Seed", seed))
p.cfg_scale = float(parsed_parameters.get("CFG scale", cfg_scale))
p.sampler_name = parsed_parameters.get("Sampler", sampler_name)
p.steps = int(parsed_parameters.get("Steps", steps))
proc = modules.scripts.scripts_img2img.run(p, *args) proc = modules.scripts.scripts_img2img.run(p, *args)
if proc is None: if proc is None:
proc = process_images(p) proc = process_images(p)
for n, processed_image in enumerate(proc.images): for n, processed_image in enumerate(proc.images):
filename = os.path.basename(image) filename = image_path.name
relpath = os.path.dirname(os.path.relpath(image, input_dir)) relpath = os.path.dirname(os.path.relpath(image, input_dir))
if n > 0: if n > 0:
@ -89,7 +132,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processed_image.save(os.path.join(output_dir, relpath, filename)) processed_image.save(os.path.join(output_dir, relpath, filename))
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
override_settings = create_override_settings_dict(override_settings_texts) override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5 is_batch = mode == 5
@ -103,7 +146,8 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
elif mode == 2: # inpaint elif mode == 2: # inpaint
image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1') alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L') mask = mask.convert('L').point(lambda x: 255 if x > 128 else 0, mode='1')
mask = ImageChops.lighter(alpha_mask, mask).convert('L')
image = image.convert("RGB") image = image.convert("RGB")
elif mode == 3: # inpaint sketch elif mode == 3: # inpaint sketch
image = inpaint_color_sketch image = inpaint_color_sketch
@ -125,7 +169,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
if image is not None: if image is not None:
image = ImageOps.exif_transpose(image) image = ImageOps.exif_transpose(image)
if selected_scale_tab == 1: if selected_scale_tab == 1 and not is_batch:
assert image, "Can't scale by because no image is selected" assert image, "Can't scale by because no image is selected"
width = int(image.width * scale_by) width = int(image.width * scale_by)
@ -171,6 +215,8 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
p.scripts = modules.scripts.scripts_img2img p.scripts = modules.scripts.scripts_img2img
p.script_args = args p.script_args = args
p.user = request.username
if shared.cmd_opts.enable_console_prompts: if shared.cmd_opts.enable_console_prompts:
print(f"\nimg2img: {prompt}", file=shared.progress_print_out) print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
@ -180,7 +226,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
if is_batch: if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args) process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
processed = Processed(p, [], p.seed, "") processed = Processed(p, [], p.seed, "")
else: else:

View File

@ -1,6 +1,5 @@
import os import os
import sys import sys
import traceback
from collections import namedtuple from collections import namedtuple
from pathlib import Path from pathlib import Path
import re import re
@ -11,7 +10,6 @@ import torch.hub
from torchvision import transforms from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode from torchvision.transforms.functional import InterpolationMode
import modules.shared as shared
from modules import devices, paths, shared, lowvram, modelloader, errors from modules import devices, paths, shared, lowvram, modelloader, errors
blip_image_eval_size = 384 blip_image_eval_size = 384
@ -160,7 +158,7 @@ class InterrogateModels:
text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)] text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
top_count = min(top_count, len(text_array)) top_count = min(top_count, len(text_array))
text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(devices.device_interrogate) text_tokens = clip.tokenize(list(text_array), truncate=True).to(devices.device_interrogate)
text_features = self.clip_model.encode_text(text_tokens).type(self.dtype) text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
text_features /= text_features.norm(dim=-1, keepdim=True) text_features /= text_features.norm(dim=-1, keepdim=True)
@ -186,8 +184,7 @@ class InterrogateModels:
def interrogate(self, pil_image): def interrogate(self, pil_image):
res = "" res = ""
shared.state.begin() shared.state.begin(job="interrogate")
shared.state.job = 'interrogate'
try: try:
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu() lowvram.send_everything_to_cpu()
@ -208,8 +205,8 @@ class InterrogateModels:
image_features /= image_features.norm(dim=-1, keepdim=True) image_features /= image_features.norm(dim=-1, keepdim=True)
for name, topn, items in self.categories(): for cat in self.categories():
matches = self.rank(image_features, items, top_count=topn) matches = self.rank(image_features, cat.items, top_count=cat.topn)
for match, score in matches: for match, score in matches:
if shared.opts.interrogate_return_ranks: if shared.opts.interrogate_return_ranks:
res += f", ({match}:{score/100:.3f})" res += f", ({match}:{score/100:.3f})"
@ -217,8 +214,7 @@ class InterrogateModels:
res += f", {match}" res += f", {match}"
except Exception: except Exception:
print("Error interrogating", file=sys.stderr) errors.report("Error interrogating", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
res += "<error>" res += "<error>"
self.unload() self.unload()

344
modules/launch_utils.py Normal file
View File

@ -0,0 +1,344 @@
# this scripts installs necessary requirements and launches main program in webui.py
import subprocess
import os
import sys
import importlib.util
import platform
import json
from functools import lru_cache
from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
args, _ = cmd_args.parser.parse_known_args()
python = sys.executable
git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "")
dir_repos = "repositories"
# Whether to default to printing command output
default_command_live = (os.environ.get('WEBUI_LAUNCH_LIVE_OUTPUT') == "1")
if 'GRADIO_ANALYTICS_ENABLED' not in os.environ:
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
def check_python_version():
is_windows = platform.system() == "Windows"
major = sys.version_info.major
minor = sys.version_info.minor
micro = sys.version_info.micro
if is_windows:
supported_minors = [10]
else:
supported_minors = [7, 8, 9, 10, 11]
if not (major == 3 and minor in supported_minors):
import modules.errors
modules.errors.print_error_explanation(f"""
INCOMPATIBLE PYTHON VERSION
This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}.
If you encounter an error with "RuntimeError: Couldn't install torch." message,
or any other error regarding unsuccessful package (library) installation,
please downgrade (or upgrade) to the latest version of 3.10 Python
and delete current Python and "venv" folder in WebUI's directory.
You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/
{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
Use --skip-python-version-check to suppress this warning.
""")
@lru_cache()
def commit_hash():
try:
return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip()
except Exception:
return "<none>"
@lru_cache()
def git_tag():
try:
return subprocess.check_output([git, "describe", "--tags"], shell=False, encoding='utf8').strip()
except Exception:
try:
from pathlib import Path
changelog_md = Path(__file__).parent.parent / "CHANGELOG.md"
with changelog_md.open(encoding="utf-8") as file:
return next((line.strip() for line in file if line.strip()), "<none>")
except Exception:
return "<none>"
def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live) -> str:
if desc is not None:
print(desc)
run_kwargs = {
"args": command,
"shell": True,
"env": os.environ if custom_env is None else custom_env,
"encoding": 'utf8',
"errors": 'ignore',
}
if not live:
run_kwargs["stdout"] = run_kwargs["stderr"] = subprocess.PIPE
result = subprocess.run(**run_kwargs)
if result.returncode != 0:
error_bits = [
f"{errdesc or 'Error running command'}.",
f"Command: {command}",
f"Error code: {result.returncode}",
]
if result.stdout:
error_bits.append(f"stdout: {result.stdout}")
if result.stderr:
error_bits.append(f"stderr: {result.stderr}")
raise RuntimeError("\n".join(error_bits))
return (result.stdout or "")
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
except ModuleNotFoundError:
return False
return spec is not None
def repo_dir(name):
return os.path.join(script_path, dir_repos, name)
def run_pip(command, desc=None, live=default_command_live):
if args.skip_install:
return
index_url_line = f' --index-url {index_url}' if index_url != '' else ''
return run(f'"{python}" -m pip {command} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}", live=live)
def check_run_python(code: str) -> bool:
result = subprocess.run([python, "-c", code], capture_output=True, shell=False)
return result.returncode == 0
def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
if os.path.exists(dir):
if commithash is None:
return
current_hash = run(f'"{git}" -C "{dir}" rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}", live=False).strip()
if current_hash == commithash:
return
run(f'"{git}" -C "{dir}" fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
run(f'"{git}" -C "{dir}" checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True)
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True)
if commithash is not None:
run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
def git_pull_recursive(dir):
for subdir, _, _ in os.walk(dir):
if os.path.exists(os.path.join(subdir, '.git')):
try:
output = subprocess.check_output([git, '-C', subdir, 'pull', '--autostash'])
print(f"Pulled changes for repository in '{subdir}':\n{output.decode('utf-8').strip()}\n")
except subprocess.CalledProcessError as e:
print(f"Couldn't perform 'git pull' on repository in '{subdir}':\n{e.output.decode('utf-8').strip()}\n")
def version_check(commit):
try:
import requests
commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json()
if commit != "<none>" and commits['commit']['sha'] != commit:
print("--------------------------------------------------------")
print("| You are not up to date with the most recent release. |")
print("| Consider running `git pull` to update. |")
print("--------------------------------------------------------")
elif commits['commit']['sha'] == commit:
print("You are up to date with the most recent release.")
else:
print("Not a git clone, can't perform version check.")
except Exception as e:
print("version check failed", e)
def run_extension_installer(extension_dir):
path_installer = os.path.join(extension_dir, "install.py")
if not os.path.isfile(path_installer):
return
try:
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
except Exception as e:
errors.report(str(e))
def list_extensions(settings_file):
settings = {}
try:
if os.path.isfile(settings_file):
with open(settings_file, "r", encoding="utf8") as file:
settings = json.load(file)
except Exception:
errors.report("Could not load settings", exc_info=True)
disabled_extensions = set(settings.get('disabled_extensions', []))
disable_all_extensions = settings.get('disable_all_extensions', 'none')
if disable_all_extensions != 'none':
return []
return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions]
def run_extensions_installers(settings_file):
if not os.path.isdir(extensions_dir):
return
for dirname_extension in list_extensions(settings_file):
run_extension_installer(os.path.join(extensions_dir, dirname_extension))
def prepare_environment():
torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "https://github.com/TencentARC/GFPGAN/archive/8d2447a2d918f8eba5a4a01463fd48e45126a379.zip")
clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip")
openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "c9fe758757e022f05ca5a53fa8fac28889e4f1cf")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
try:
# the existance of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution
os.remove(os.path.join(script_path, "tmp", "restart"))
os.environ.setdefault('SD_WEBUI_RESTARTING ', '1')
except OSError:
pass
if not args.skip_python_version_check:
check_python_version()
commit = commit_hash()
tag = git_tag()
print(f"Python {sys.version}")
print(f"Version: {tag}")
print(f"Commit hash: {commit}")
if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"):
raise RuntimeError(
'Torch is not able to use GPU; '
'add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'
)
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
if not is_installed("open_clip"):
run_pip(f"install {openclip_package}", "open_clip")
if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers:
if platform.system() == "Windows":
if platform.python_version().startswith("3.10"):
run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True)
else:
print("Installation of xformers is not supported in this version of Python.")
print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
if not is_installed("xformers"):
exit(0)
elif platform.system() == "Linux":
run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
if not is_installed("ngrok") and args.ngrok:
run_pip("install ngrok", "ngrok")
os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
if not is_installed("lpips"):
run_pip(f"install -r \"{os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\"", "requirements for CodeFormer")
if not os.path.isfile(requirements_file):
requirements_file = os.path.join(script_path, requirements_file)
run_pip(f"install -r \"{requirements_file}\"", "requirements")
run_extensions_installers(settings_file=args.ui_settings_file)
if args.update_check:
version_check(commit)
if args.update_all_extensions:
git_pull_recursive(extensions_dir)
if "--exit" in sys.argv:
print("Exiting because of --exit argument")
exit(0)
def configure_for_tests():
if "--api" not in sys.argv:
sys.argv.append("--api")
if "--ckpt" not in sys.argv:
sys.argv.append("--ckpt")
sys.argv.append(os.path.join(script_path, "test/test_files/empty.pt"))
if "--skip-torch-cuda-test" not in sys.argv:
sys.argv.append("--skip-torch-cuda-test")
if "--disable-nan-check" not in sys.argv:
sys.argv.append("--disable-nan-check")
os.environ['COMMANDLINE_ARGS'] = ""
def start():
print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
import webui
if '--nowebui' in sys.argv:
webui.api_only()
else:
webui.webui()

View File

@ -1,8 +1,7 @@
import json import json
import os import os
import sys
import traceback
from modules import errors
localizations = {} localizations = {}
@ -31,7 +30,6 @@ def localization_js(current_localization_name: str) -> str:
with open(fn, "r", encoding="utf8") as file: with open(fn, "r", encoding="utf8") as file:
data = json.load(file) data = json.load(file)
except Exception: except Exception:
print(f"Error loading localization from {fn}:", file=sys.stderr) errors.report(f"Error loading localization from {fn}", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
return f"window.localization = {json.dumps(data)}" return f"window.localization = {json.dumps(data)}"

View File

@ -15,6 +15,8 @@ def send_everything_to_cpu():
def setup_for_low_vram(sd_model, use_medvram): def setup_for_low_vram(sd_model, use_medvram):
sd_model.lowvram = True
parents = {} parents = {}
def send_me_to_gpu(module, _): def send_me_to_gpu(module, _):
@ -96,3 +98,7 @@ def setup_for_low_vram(sd_model, use_medvram):
diff_model.middle_block.register_forward_pre_hook(send_me_to_gpu) diff_model.middle_block.register_forward_pre_hook(send_me_to_gpu)
for block in diff_model.output_blocks: for block in diff_model.output_blocks:
block.register_forward_pre_hook(send_me_to_gpu) block.register_forward_pre_hook(send_me_to_gpu)
def is_enabled(sd_model):
return getattr(sd_model, 'lowvram', False)

View File

@ -1,13 +1,15 @@
import torch import torch
import platform import platform
from modules import paths
from modules.sd_hijack_utils import CondFunc from modules.sd_hijack_utils import CondFunc
from packaging import version from packaging import version
# has_mps is only available in nightly pytorch (for now) and macOS 12.3+. # before torch version 1.13, has_mps is only available in nightly pytorch and macOS 12.3+,
# check `getattr` and try it for compatibility # use check `getattr` and try it for compatibility.
# in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availabilty,
# since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://github.com/pytorch/pytorch/pull/103279
def check_for_mps() -> bool: def check_for_mps() -> bool:
if version.parse(torch.__version__) <= version.parse("2.0.1"):
if not getattr(torch, 'has_mps', False): if not getattr(torch, 'has_mps', False):
return False return False
try: try:
@ -15,6 +17,8 @@ def check_for_mps() -> bool:
return True return True
except Exception: except Exception:
return False return False
else:
return torch.backends.mps.is_available() and torch.backends.mps.is_built()
has_mps = check_for_mps() has_mps = check_for_mps()

View File

@ -1,4 +1,5 @@
import glob from __future__ import annotations
import os import os
import shutil import shutil
import importlib import importlib
@ -9,6 +10,29 @@ from modules.upscaler import Upscaler, UpscalerLanczos, UpscalerNearest, Upscale
from modules.paths import script_path, models_path from modules.paths import script_path, models_path
def load_file_from_url(
url: str,
*,
model_dir: str,
progress: bool = True,
file_name: str | None = None,
) -> str:
"""Download a file from `url` into `model_dir`, using the file present if possible.
Returns the path to the downloaded file.
"""
os.makedirs(model_dir, exist_ok=True)
if not file_name:
parts = urlparse(url)
file_name = os.path.basename(parts.path)
cached_file = os.path.abspath(os.path.join(model_dir, file_name))
if not os.path.exists(cached_file):
print(f'Downloading: "{url}" to {cached_file}\n')
from torch.hub import download_url_to_file
download_url_to_file(url, cached_file, progress=progress)
return cached_file
def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None) -> list: def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None) -> list:
""" """
A one-and done loader to try finding the desired models in specified directories. A one-and done loader to try finding the desired models in specified directories.
@ -40,16 +64,14 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
if os.path.islink(full_path) and not os.path.exists(full_path): if os.path.islink(full_path) and not os.path.exists(full_path):
print(f"Skipping broken symlink: {full_path}") print(f"Skipping broken symlink: {full_path}")
continue continue
if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist):
continue continue
if full_path not in output: if full_path not in output:
output.append(full_path) output.append(full_path)
if model_url is not None and len(output) == 0: if model_url is not None and len(output) == 0:
if download_name is not None: if download_name is not None:
from basicsr.utils.download_util import load_file_from_url output.append(load_file_from_url(model_url, model_dir=places[0], file_name=download_name))
dl = load_file_from_url(model_url, model_path, True, download_name)
output.append(dl)
else: else:
output.append(model_url) output.append(model_url)
@ -60,7 +82,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
def friendly_name(file: str): def friendly_name(file: str):
if "http" in file: if file.startswith("http"):
file = urlparse(file).path file = urlparse(file).path
file = os.path.basename(file) file = os.path.basename(file)
@ -96,8 +118,7 @@ def cleanup_models():
def move_files(src_path: str, dest_path: str, ext_filter: str = None): def move_files(src_path: str, dest_path: str, ext_filter: str = None):
try: try:
if not os.path.exists(dest_path): os.makedirs(dest_path, exist_ok=True)
os.makedirs(dest_path)
if os.path.exists(src_path): if os.path.exists(src_path):
for file in os.listdir(src_path): for file in os.listdir(src_path):
fullpath = os.path.join(src_path, file) fullpath = os.path.join(src_path, file)
@ -108,12 +129,12 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None):
print(f"Moving {file} from {src_path} to {dest_path}.") print(f"Moving {file} from {src_path} to {dest_path}.")
try: try:
shutil.move(fullpath, dest_path) shutil.move(fullpath, dest_path)
except: except Exception:
pass pass
if len(os.listdir(src_path)) == 0: if len(os.listdir(src_path)) == 0:
print(f"Removing empty folder: {src_path}") print(f"Removing empty folder: {src_path}")
shutil.rmtree(src_path, True) shutil.rmtree(src_path, True)
except: except Exception:
pass pass
@ -127,7 +148,7 @@ def load_upscalers():
full_model = f"modules.{model_name}_model" full_model = f"modules.{model_name}_model"
try: try:
importlib.import_module(full_model) importlib.import_module(full_model)
except: except Exception:
pass pass
datas = [] datas = []
@ -145,7 +166,10 @@ def load_upscalers():
for cls in reversed(used_classes.values()): for cls in reversed(used_classes.values()):
name = cls.__name__ name = cls.__name__
cmd_name = f"{name.lower().replace('upscaler', '')}_models_path" cmd_name = f"{name.lower().replace('upscaler', '')}_models_path"
scaler = cls(commandline_options.get(cmd_name, None)) commandline_model_path = commandline_options.get(cmd_name, None)
scaler = cls(commandline_model_path)
scaler.user_path = commandline_model_path
scaler.model_download_path = commandline_model_path or scaler.model_path
datas += scaler.scalers datas += scaler.scalers
shared.sd_upscalers = sorted( shared.sd_upscalers = sorted(

View File

@ -52,7 +52,7 @@ class DDPM(pl.LightningModule):
beta_schedule="linear", beta_schedule="linear",
loss_type="l2", loss_type="l2",
ckpt_path=None, ckpt_path=None,
ignore_keys=[], ignore_keys=None,
load_only_unet=False, load_only_unet=False,
monitor="val/loss", monitor="val/loss",
use_ema=True, use_ema=True,
@ -107,7 +107,7 @@ class DDPM(pl.LightningModule):
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
if ckpt_path is not None: if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
# If initialing from EMA-only checkpoint, create EMA model after loading. # If initialing from EMA-only checkpoint, create EMA model after loading.
if self.use_ema and not load_ema: if self.use_ema and not load_ema:
@ -194,7 +194,9 @@ class DDPM(pl.LightningModule):
if context is not None: if context is not None:
print(f"{context}: Restored training weights") print(f"{context}: Restored training weights")
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
ignore_keys = ignore_keys or []
sd = torch.load(path, map_location="cpu") sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()): if "state_dict" in list(sd.keys()):
sd = sd["state_dict"] sd = sd["state_dict"]
@ -228,9 +230,9 @@ class DDPM(pl.LightningModule):
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
sd, strict=False) sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0: if missing:
print(f"Missing Keys: {missing}") print(f"Missing Keys: {missing}")
if len(unexpected) > 0: if unexpected:
print(f"Unexpected Keys: {unexpected}") print(f"Unexpected Keys: {unexpected}")
def q_mean_variance(self, x_start, t): def q_mean_variance(self, x_start, t):
@ -403,7 +405,7 @@ class DDPM(pl.LightningModule):
@torch.no_grad() @torch.no_grad()
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
log = dict() log = {}
x = self.get_input(batch, self.first_stage_key) x = self.get_input(batch, self.first_stage_key)
N = min(x.shape[0], N) N = min(x.shape[0], N)
n_row = min(x.shape[0], n_row) n_row = min(x.shape[0], n_row)
@ -411,7 +413,7 @@ class DDPM(pl.LightningModule):
log["inputs"] = x log["inputs"] = x
# get diffusion row # get diffusion row
diffusion_row = list() diffusion_row = []
x_start = x[:n_row] x_start = x[:n_row]
for t in range(self.num_timesteps): for t in range(self.num_timesteps):
@ -473,13 +475,13 @@ class LatentDiffusion(DDPM):
conditioning_key = None conditioning_key = None
ckpt_path = kwargs.pop("ckpt_path", None) ckpt_path = kwargs.pop("ckpt_path", None)
ignore_keys = kwargs.pop("ignore_keys", []) ignore_keys = kwargs.pop("ignore_keys", [])
super().__init__(conditioning_key=conditioning_key, *args, load_ema=load_ema, **kwargs) super().__init__(*args, conditioning_key=conditioning_key, load_ema=load_ema, **kwargs)
self.concat_mode = concat_mode self.concat_mode = concat_mode
self.cond_stage_trainable = cond_stage_trainable self.cond_stage_trainable = cond_stage_trainable
self.cond_stage_key = cond_stage_key self.cond_stage_key = cond_stage_key
try: try:
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
except: except Exception:
self.num_downs = 0 self.num_downs = 0
if not scale_by_std: if not scale_by_std:
self.scale_factor = scale_factor self.scale_factor = scale_factor
@ -891,16 +893,6 @@ class LatentDiffusion(DDPM):
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
return self.p_losses(x, c, t, *args, **kwargs) return self.p_losses(x, c, t, *args, **kwargs)
def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
def rescale_bbox(bbox):
x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
w = min(bbox[2] / crop_coordinates[2], 1 - x0)
h = min(bbox[3] / crop_coordinates[3], 1 - y0)
return x0, y0, w, h
return [rescale_bbox(b) for b in bboxes]
def apply_model(self, x_noisy, t, cond, return_ids=False): def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict): if isinstance(cond, dict):
@ -1140,7 +1132,7 @@ class LatentDiffusion(DDPM):
if cond is not None: if cond is not None:
if isinstance(cond, dict): if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond} [x[:batch_size] for x in cond[key]] for key in cond}
else: else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
@ -1171,8 +1163,10 @@ class LatentDiffusion(DDPM):
if i % log_every_t == 0 or i == timesteps - 1: if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(x0_partial) intermediates.append(x0_partial)
if callback: callback(i) if callback:
if img_callback: img_callback(img, i) callback(i)
if img_callback:
img_callback(img, i)
return img, intermediates return img, intermediates
@torch.no_grad() @torch.no_grad()
@ -1219,8 +1213,10 @@ class LatentDiffusion(DDPM):
if i % log_every_t == 0 or i == timesteps - 1: if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(img) intermediates.append(img)
if callback: callback(i) if callback:
if img_callback: img_callback(img, i) callback(i)
if img_callback:
img_callback(img, i)
if return_intermediates: if return_intermediates:
return img, intermediates return img, intermediates
@ -1235,7 +1231,7 @@ class LatentDiffusion(DDPM):
if cond is not None: if cond is not None:
if isinstance(cond, dict): if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond} [x[:batch_size] for x in cond[key]] for key in cond}
else: else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
return self.p_sample_loop(cond, return self.p_sample_loop(cond,
@ -1267,7 +1263,7 @@ class LatentDiffusion(DDPM):
use_ddim = False use_ddim = False
log = dict() log = {}
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
return_first_stage_outputs=True, return_first_stage_outputs=True,
force_c_encode=True, force_c_encode=True,
@ -1295,7 +1291,7 @@ class LatentDiffusion(DDPM):
if plot_diffusion_rows: if plot_diffusion_rows:
# get diffusion row # get diffusion row
diffusion_row = list() diffusion_row = []
z_start = z[:n_row] z_start = z[:n_row]
for t in range(self.num_timesteps): for t in range(self.num_timesteps):
if t % self.log_every_t == 0 or t == self.num_timesteps - 1: if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
@ -1337,7 +1333,7 @@ class LatentDiffusion(DDPM):
if inpaint: if inpaint:
# make a simple center square # make a simple center square
b, h, w = z.shape[0], z.shape[2], z.shape[3] h, w = z.shape[2], z.shape[3]
mask = torch.ones(N, h, w).to(self.device) mask = torch.ones(N, h, w).to(self.device)
# zeros will be filled in # zeros will be filled in
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
@ -1439,10 +1435,10 @@ class Layout2ImgDiffusion(LatentDiffusion):
# TODO: move all layout-specific hacks to this class # TODO: move all layout-specific hacks to this class
def __init__(self, cond_stage_key, *args, **kwargs): def __init__(self, cond_stage_key, *args, **kwargs):
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
def log_images(self, batch, N=8, *args, **kwargs): def log_images(self, batch, N=8, *args, **kwargs):
logs = super().log_images(batch=batch, N=N, *args, **kwargs) logs = super().log_images(*args, batch=batch, N=N, **kwargs)
key = 'train' if self.training else 'validation' key = 'train' if self.training else 'validation'
dset = self.trainer.datamodule.datasets[key] dset = self.trainer.datamodule.datasets[key]

View File

@ -1 +1 @@
from .sampler import UniPCSampler from .sampler import UniPCSampler # noqa: F401

View File

@ -54,7 +54,8 @@ class UniPCSampler(object):
if conditioning is not None: if conditioning is not None:
if isinstance(conditioning, dict): if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]] ctmp = conditioning[list(conditioning.keys())[0]]
while isinstance(ctmp, list): ctmp = ctmp[0] while isinstance(ctmp, list):
ctmp = ctmp[0]
cbs = ctmp.shape[0] cbs = ctmp.shape[0]
if cbs != batch_size: if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")

View File

@ -1,7 +1,6 @@
import torch import torch
import torch.nn.functional as F
import math import math
from tqdm.auto import trange import tqdm
class NoiseScheduleVP: class NoiseScheduleVP:
@ -179,13 +178,13 @@ def model_wrapper(
model, model,
noise_schedule, noise_schedule,
model_type="noise", model_type="noise",
model_kwargs={}, model_kwargs=None,
guidance_type="uncond", guidance_type="uncond",
#condition=None, #condition=None,
#unconditional_condition=None, #unconditional_condition=None,
guidance_scale=1., guidance_scale=1.,
classifier_fn=None, classifier_fn=None,
classifier_kwargs={}, classifier_kwargs=None,
): ):
"""Create a wrapper function for the noise prediction model. """Create a wrapper function for the noise prediction model.
@ -276,6 +275,9 @@ def model_wrapper(
A noise prediction model that accepts the noised data and the continuous time as the inputs. A noise prediction model that accepts the noised data and the continuous time as the inputs.
""" """
model_kwargs = model_kwargs or {}
classifier_kwargs = classifier_kwargs or {}
def get_model_input_time(t_continuous): def get_model_input_time(t_continuous):
""" """
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
@ -342,7 +344,7 @@ def model_wrapper(
t_in = torch.cat([t_continuous] * 2) t_in = torch.cat([t_continuous] * 2)
if isinstance(condition, dict): if isinstance(condition, dict):
assert isinstance(unconditional_condition, dict) assert isinstance(unconditional_condition, dict)
c_in = dict() c_in = {}
for k in condition: for k in condition:
if isinstance(condition[k], list): if isinstance(condition[k], list):
c_in[k] = [torch.cat([ c_in[k] = [torch.cat([
@ -353,7 +355,7 @@ def model_wrapper(
unconditional_condition[k], unconditional_condition[k],
condition[k]]) condition[k]])
elif isinstance(condition, list): elif isinstance(condition, list):
c_in = list() c_in = []
assert isinstance(unconditional_condition, list) assert isinstance(unconditional_condition, list)
for i in range(len(condition)): for i in range(len(condition)):
c_in.append(torch.cat([unconditional_condition[i], condition[i]])) c_in.append(torch.cat([unconditional_condition[i], condition[i]]))
@ -757,6 +759,7 @@ class UniPC:
vec_t = timesteps[0].expand((x.shape[0])) vec_t = timesteps[0].expand((x.shape[0]))
model_prev_list = [self.model_fn(x, vec_t)] model_prev_list = [self.model_fn(x, vec_t)]
t_prev_list = [vec_t] t_prev_list = [vec_t]
with tqdm.tqdm(total=steps) as pbar:
# Init the first `order` values by lower order multistep DPM-Solver. # Init the first `order` values by lower order multistep DPM-Solver.
for init_order in range(1, order): for init_order in range(1, order):
vec_t = timesteps[init_order].expand(x.shape[0]) vec_t = timesteps[init_order].expand(x.shape[0])
@ -767,7 +770,9 @@ class UniPC:
self.after_update(x, model_x) self.after_update(x, model_x)
model_prev_list.append(model_x) model_prev_list.append(model_x)
t_prev_list.append(vec_t) t_prev_list.append(vec_t)
for step in trange(order, steps + 1): pbar.update()
for step in range(order, steps + 1):
vec_t = timesteps[step].expand(x.shape[0]) vec_t = timesteps[step].expand(x.shape[0])
if lower_order_final: if lower_order_final:
step_order = min(order, steps + 1 - step) step_order = min(order, steps + 1 - step)
@ -791,6 +796,7 @@ class UniPC:
if model_x is None: if model_x is None:
model_x = self.model_fn(x, vec_t) model_x = self.model_fn(x, vec_t)
model_prev_list[-1] = model_x model_prev_list[-1] = model_x
pbar.update()
else: else:
raise NotImplementedError() raise NotImplementedError()
if denoise_to_zero: if denoise_to_zero:

View File

@ -1,6 +1,7 @@
from pyngrok import ngrok, conf, exception import ngrok
def connect(token, port, region): # Connect to ngrok for ingress
def connect(token, port, options):
account = None account = None
if token is None: if token is None:
token = 'None' token = 'None'
@ -10,28 +11,19 @@ def connect(token, port, region):
token, username, password = token.split(':', 2) token, username, password = token.split(':', 2)
account = f"{username}:{password}" account = f"{username}:{password}"
config = conf.PyngrokConfig( # For all options see: https://github.com/ngrok/ngrok-py/blob/main/examples/ngrok-connect-full.py
auth_token=token, region=region if not options.get('authtoken_from_env'):
) options['authtoken'] = token
if account:
options['basic_auth'] = account
if not options.get('session_metadata'):
options['session_metadata'] = 'stable-diffusion-webui'
# Guard for existing tunnels
existing = ngrok.get_tunnels(pyngrok_config=config)
if existing:
for established in existing:
# Extra configuration in the case that the user is also using ngrok for other tunnels
if established.config['addr'][-4:] == str(port):
public_url = existing[0].public_url
print(f'ngrok has already been connected to localhost:{port}! URL: {public_url}\n'
'You can use this link after the launch is complete.')
return
try: try:
if account is None: public_url = ngrok.connect(f"127.0.0.1:{port}", **options).url()
public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url except Exception as e:
else: print(f'Invalid ngrok authtoken? ngrok connection aborted due to: {e}\n'
public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True, auth=account).public_url
except exception.PyngrokNgrokError:
print(f'Invalid ngrok authtoken, ngrok connection aborted.\n'
f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken') f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken')
else: else:
print(f'ngrok connected to localhost:{port}! URL: {public_url}\n' print(f'ngrok connected to localhost:{port}! URL: {public_url}\n'

View File

@ -1,8 +1,8 @@
import os import os
import sys import sys
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401
import modules.safe import modules.safe # noqa: F401
# data_path = cmd_opts_pre.data # data_path = cmd_opts_pre.data
@ -20,7 +20,6 @@ assert sd_path is not None, f"Couldn't find Stable Diffusion in any of: {possibl
path_dirs = [ path_dirs = [
(sd_path, 'ldm', 'Stable Diffusion', []), (sd_path, 'ldm', 'Stable Diffusion', []),
(os.path.join(sd_path, '../taming-transformers'), 'taming', 'Taming Transformers', []),
(os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer', []), (os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer', []),
(os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []), (os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []),
(os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]), (os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]),
@ -39,17 +38,3 @@ for d, must_exist, what, options in path_dirs:
else: else:
sys.path.append(d) sys.path.append(d)
paths[what] = d paths[what] = d
class Prioritize:
def __init__(self, name):
self.name = name
self.path = None
def __enter__(self):
self.path = sys.path.copy()
sys.path = [paths[self.name]] + sys.path
def __exit__(self, exc_type, exc_val, exc_tb):
sys.path = self.path
self.path = None

View File

@ -2,8 +2,14 @@
import argparse import argparse
import os import os
import sys
import shlex
script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
sys.argv += shlex.split(commandline_args)
modules_path = os.path.dirname(os.path.realpath(__file__))
script_path = os.path.dirname(modules_path)
sd_configs_path = os.path.join(script_path, "configs") sd_configs_path = os.path.join(script_path, "configs")
sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml") sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml")
@ -12,7 +18,7 @@ default_sd_model_file = sd_model_file
# Parse the --data-dir flag first so we can use it as a base for our other argument default values # Parse the --data-dir flag first so we can use it as a base for our other argument default values
parser_pre = argparse.ArgumentParser(add_help=False) parser_pre = argparse.ArgumentParser(add_help=False)
parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored",) parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", )
cmd_opts_pre = parser_pre.parse_known_args()[0] cmd_opts_pre = parser_pre.parse_known_args()[0]
data_path = cmd_opts_pre.data_dir data_path = cmd_opts_pre.data_dir
@ -21,3 +27,5 @@ models_path = os.path.join(data_path, "models")
extensions_dir = os.path.join(data_path, "extensions") extensions_dir = os.path.join(data_path, "extensions")
extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") extensions_builtin_dir = os.path.join(script_path, "extensions-builtin")
config_states_dir = os.path.join(script_path, "config_states") config_states_dir = os.path.join(script_path, "config_states")
roboto_ttf_file = os.path.join(modules_path, 'Roboto-Regular.ttf')

View File

@ -9,8 +9,7 @@ from modules.shared import opts
def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True):
devices.torch_gc() devices.torch_gc()
shared.state.begin() shared.state.begin(job="extras")
shared.state.job = 'extras'
image_data = [] image_data = []
image_names = [] image_names = []
@ -54,7 +53,9 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
for image, name in zip(image_data, image_names): for image, name in zip(image_data, image_names):
shared.state.textinfo = name shared.state.textinfo = name
existing_pnginfo = image.info or {} parameters, existing_pnginfo = images.read_info_from_image(image)
if parameters:
existing_pnginfo["parameters"] = parameters
pp = scripts_postprocessing.PostprocessedImage(image.convert("RGB")) pp = scripts_postprocessing.PostprocessedImage(image.convert("RGB"))

View File

@ -1,20 +1,20 @@
import json import json
import logging
import math import math
import os import os
import sys import sys
import warnings
import hashlib import hashlib
import torch import torch
import numpy as np import numpy as np
from PIL import Image, ImageFilter, ImageOps from PIL import Image, ImageOps
import random import random
import cv2 import cv2
from skimage import exposure from skimage import exposure
from typing import Any, Dict, List, Optional from typing import Any, Dict, List
import modules.sd_hijack import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet
from modules.sd_hijack import model_hijack from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state from modules.shared import opts, cmd_opts, state
import modules.shared as shared import modules.shared as shared
@ -24,13 +24,13 @@ import modules.images as images
import modules.styles import modules.styles
import modules.sd_models as sd_models import modules.sd_models as sd_models
import modules.sd_vae as sd_vae import modules.sd_vae as sd_vae
import logging
from ldm.data.util import AddMiDaS from ldm.data.util import AddMiDaS
from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
from einops import repeat, rearrange from einops import repeat, rearrange
from blendmodes.blend import blendLayers, BlendType from blendmodes.blend import blendLayers, BlendType
# some of those options should not be changed at all because they would break the model, so I removed them from options. # some of those options should not be changed at all because they would break the model, so I removed them from options.
opt_C = 4 opt_C = 4
opt_f = 8 opt_f = 8
@ -106,6 +106,9 @@ class StableDiffusionProcessing:
""" """
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
""" """
cached_uc = [None, None]
cached_c = [None, None]
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
if sampler_index is not None: if sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
@ -150,6 +153,8 @@ class StableDiffusionProcessing:
self.override_settings_restore_afterwards = override_settings_restore_afterwards self.override_settings_restore_afterwards = override_settings_restore_afterwards
self.is_using_inpainting_conditioning = False self.is_using_inpainting_conditioning = False
self.disable_extra_networks = False self.disable_extra_networks = False
self.token_merging_ratio = 0
self.token_merging_ratio_hr = 0
if not seed_enable_extras: if not seed_enable_extras:
self.subseed = -1 self.subseed = -1
@ -165,7 +170,21 @@ class StableDiffusionProcessing:
self.all_subseeds = None self.all_subseeds = None
self.iteration = 0 self.iteration = 0
self.is_hr_pass = False self.is_hr_pass = False
self.sampler = None
self.prompts = None
self.negative_prompts = None
self.extra_network_data = None
self.seeds = None
self.subseeds = None
self.step_multiplier = 1
self.cached_uc = StableDiffusionProcessing.cached_uc
self.cached_c = StableDiffusionProcessing.cached_c
self.uc = None
self.c = None
self.user = None
@property @property
def sd_model(self): def sd_model(self):
@ -273,6 +292,64 @@ class StableDiffusionProcessing:
def close(self): def close(self):
self.sampler = None self.sampler = None
self.c = None
self.uc = None
if not opts.experimental_persistent_cond_cache:
StableDiffusionProcessing.cached_c = [None, None]
StableDiffusionProcessing.cached_uc = [None, None]
def get_token_merging_ratio(self, for_hr=False):
if for_hr:
return self.token_merging_ratio_hr or opts.token_merging_ratio_hr or self.token_merging_ratio or opts.token_merging_ratio
return self.token_merging_ratio or opts.token_merging_ratio
def setup_prompts(self):
if type(self.prompt) == list:
self.all_prompts = self.prompt
else:
self.all_prompts = self.batch_size * self.n_iter * [self.prompt]
if type(self.negative_prompt) == list:
self.all_negative_prompts = self.negative_prompt
else:
self.all_negative_prompts = self.batch_size * self.n_iter * [self.negative_prompt]
self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts]
self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts]
def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data):
"""
Returns the result of calling function(shared.sd_model, required_prompts, steps)
using a cache to store the result if the same arguments have been used before.
cache is an array containing two elements. The first element is a tuple
representing the previously used arguments, or None if no arguments
have been used before. The second element is where the previously
computed result is stored.
caches is a list with items described above.
"""
for cache in caches:
if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info, extra_network_data) == cache[0]:
return cache[1]
cache = caches[0]
with devices.autocast():
cache[1] = function(shared.sd_model, required_prompts, steps)
cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info, extra_network_data)
return cache[1]
def setup_conds(self):
sampler_config = sd_samplers.find_sampler_config(self.sampler_name)
self.step_multiplier = 2 if sampler_config and sampler_config.options.get("second_order", False) else 1
self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.negative_prompts, self.steps * self.step_multiplier, [self.cached_uc], self.extra_network_data)
self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.prompts, self.steps * self.step_multiplier, [self.cached_c], self.extra_network_data)
def parse_extra_network_prompts(self):
self.prompts, self.extra_network_data = extra_networks.parse_prompts(self.prompts)
class Processed: class Processed:
@ -303,6 +380,8 @@ class Processed:
self.styles = p.styles self.styles = p.styles
self.job_timestamp = state.job_timestamp self.job_timestamp = state.job_timestamp
self.clip_skip = opts.CLIP_stop_at_last_layers self.clip_skip = opts.CLIP_stop_at_last_layers
self.token_merging_ratio = p.token_merging_ratio
self.token_merging_ratio_hr = p.token_merging_ratio_hr
self.eta = p.eta self.eta = p.eta
self.ddim_discretize = p.ddim_discretize self.ddim_discretize = p.ddim_discretize
@ -310,6 +389,7 @@ class Processed:
self.s_tmin = p.s_tmin self.s_tmin = p.s_tmin
self.s_tmax = p.s_tmax self.s_tmax = p.s_tmax
self.s_noise = p.s_noise self.s_noise = p.s_noise
self.s_min_uncond = p.s_min_uncond
self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0] self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0] self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
@ -360,6 +440,9 @@ class Processed:
def infotext(self, p: StableDiffusionProcessing, index): def infotext(self, p: StableDiffusionProcessing, index):
return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size) return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
def get_token_merging_ratio(self, for_hr=False):
return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio
# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3 # from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
def slerp(val, low, high): def slerp(val, low, high):
@ -468,10 +551,17 @@ def program_version():
return res return res
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0): def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False):
index = position_in_batch + iteration * p.batch_size index = position_in_batch + iteration * p.batch_size
clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
enable_hr = getattr(p, 'enable_hr', False)
token_merging_ratio = p.get_token_merging_ratio()
token_merging_ratio_hr = p.get_token_merging_ratio(for_hr=True)
uses_ensd = opts.eta_noise_seed_delta != 0
if uses_ensd:
uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p)
generation_params = { generation_params = {
"Steps": p.steps, "Steps": p.steps,
@ -485,27 +575,33 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), "Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None), "Denoising strength": getattr(p, 'denoising_strength', None),
"Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
"Clip skip": None if clip_skip <= 1 else clip_skip, "Clip skip": None if clip_skip <= 1 else clip_skip,
"ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, "ENSD": opts.eta_noise_seed_delta if uses_ensd else None,
"Token merging ratio": None if token_merging_ratio == 0 else token_merging_ratio,
"Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr,
"Init image hash": getattr(p, 'init_img_hash', None), "Init image hash": getattr(p, 'init_img_hash', None),
"RNG": opts.randn_source if opts.randn_source != "GPU" else None, "RNG": opts.randn_source if opts.randn_source != "GPU" else None,
"NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
**p.extra_generation_params,
"Version": program_version() if opts.add_version_to_infotext else None, "Version": program_version() if opts.add_version_to_infotext else None,
"User": p.user if opts.add_user_name_to_info else None,
} }
generation_params.update(p.extra_generation_params)
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None]) generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
prompt_text = p.prompt if use_main_prompt else all_prompts[index]
negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else "" negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else ""
return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip() return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
def process_images(p: StableDiffusionProcessing) -> Processed: def process_images(p: StableDiffusionProcessing) -> Processed:
if p.scripts is not None:
p.scripts.before_process(p)
stored_opts = {k: opts.data[k] for k in p.override_settings.keys()} stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
try: try:
@ -523,9 +619,13 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if k == 'sd_vae': if k == 'sd_vae':
sd_vae.reload_vae_weights() sd_vae.reload_vae_weights()
sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio())
res = process_images_inner(p) res = process_images_inner(p)
finally: finally:
sd_models.apply_token_merging(p.sd_model, 0)
# restore opts to original state # restore opts to original state
if p.override_settings_restore_afterwards: if p.override_settings_restore_afterwards:
for k, v in stored_opts.items(): for k, v in stored_opts.items():
@ -555,15 +655,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
comments = {} comments = {}
if type(p.prompt) == list: p.setup_prompts()
p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.prompt]
else:
p.all_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_styles_to_prompt(p.prompt, p.styles)]
if type(p.negative_prompt) == list:
p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in p.negative_prompt]
else:
p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)]
if type(seed) == list: if type(seed) == list:
p.all_seeds = seed p.all_seeds = seed
@ -575,8 +667,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
else: else:
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))] p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
def infotext(iteration=0, position_in_batch=0): def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch) return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch, use_main_prompt)
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
model_hijack.embedding_db.load_textual_inversion_embeddings() model_hijack.embedding_db.load_textual_inversion_embeddings()
@ -587,29 +679,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
infotexts = [] infotexts = []
output_images = [] output_images = []
cached_uc = [None, None]
cached_c = [None, None]
def get_conds_with_caching(function, required_prompts, steps, cache):
"""
Returns the result of calling function(shared.sd_model, required_prompts, steps)
using a cache to store the result if the same arguments have been used before.
cache is an array containing two elements. The first element is a tuple
representing the previously used arguments, or None if no arguments
have been used before. The second element is where the previously
computed result is stored.
"""
if cache[0] is not None and (required_prompts, steps) == cache[0]:
return cache[1]
with devices.autocast():
cache[1] = function(shared.sd_model, required_prompts, steps)
cache[0] = (required_prompts, steps)
return cache[1]
with torch.no_grad(), p.sd_model.ema_scope(): with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast(): with devices.autocast():
p.init(p.all_prompts, p.all_seeds, p.all_subseeds) p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
@ -618,10 +687,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN": if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
sd_vae_approx.model() sd_vae_approx.model()
sd_unet.apply_unet()
if state.job_count == -1: if state.job_count == -1:
state.job_count = p.n_iter state.job_count = p.n_iter
extra_network_data = None
for n in range(p.n_iter): for n in range(p.n_iter):
p.iteration = n p.iteration = n
@ -631,25 +701,25 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if state.interrupted: if state.interrupted:
break break
prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size] p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
if p.scripts is not None: if p.scripts is not None:
p.scripts.before_process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
if len(prompts) == 0: if len(p.prompts) == 0:
break break
prompts, extra_network_data = extra_networks.parse_prompts(prompts) p.parse_extra_network_prompts()
if not p.disable_extra_networks: if not p.disable_extra_networks:
with devices.autocast(): with devices.autocast():
extra_networks.activate(p, extra_network_data) extra_networks.activate(p, p.extra_network_data)
if p.scripts is not None: if p.scripts is not None:
p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) p.scripts.process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
# params.txt should be saved after scripts.process_batch, since the # params.txt should be saved after scripts.process_batch, since the
# infotext could be modified by that callback # infotext could be modified by that callback
@ -660,14 +730,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
processed = Processed(p, [], p.seed, "") processed = Processed(p, [], p.seed, "")
file.write(processed.infotext(p, 0)) file.write(processed.infotext(p, 0))
step_multiplier = 1 p.setup_conds()
if not shared.opts.dont_fix_second_order_samplers_schedule:
try:
step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1
except:
pass
uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc)
c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c)
if len(model_hijack.comments) > 0: if len(model_hijack.comments) > 0:
for comment in model_hijack.comments: for comment in model_hijack.comments:
@ -677,7 +740,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
shared.state.job = f"Batch {n+1} out of {p.n_iter}" shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))] x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))]
for x in x_samples_ddim: for x in x_samples_ddim:
@ -688,7 +751,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
del samples_ddim del samples_ddim
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: if lowvram.is_enabled(shared.sd_model):
lowvram.send_everything_to_cpu() lowvram.send_everything_to_cpu()
devices.torch_gc() devices.torch_gc()
@ -704,7 +767,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.restore_faces: if p.restore_faces:
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration: if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration") images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
devices.torch_gc() devices.torch_gc()
@ -721,13 +784,13 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.color_corrections is not None and i < len(p.color_corrections): if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction: if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images) image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction") images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image) image = apply_color_correction(p.color_corrections[i], image)
image = apply_overlay(image, p.paste_to, i, p.overlay_images) image = apply_overlay(image, p.paste_to, i, p.overlay_images)
if opts.samples_save and not p.do_not_save_samples: if opts.samples_save and not p.do_not_save_samples:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p) images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p)
text = infotext(n, i) text = infotext(n, i)
infotexts.append(text) infotexts.append(text)
@ -740,10 +803,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
if opts.save_mask: if opts.save_mask:
images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask") images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask")
if opts.save_mask_composite: if opts.save_mask_composite:
images.save_image(image_mask_composite, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite") images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite")
if opts.return_mask: if opts.return_mask:
output_images.append(image_mask) output_images.append(image_mask)
@ -765,7 +828,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
grid = images.image_grid(output_images, p.batch_size) grid = images.image_grid(output_images, p.batch_size)
if opts.return_grid: if opts.return_grid:
text = infotext() text = infotext(use_main_prompt=True)
infotexts.insert(0, text) infotexts.insert(0, text)
if opts.enable_pnginfo: if opts.enable_pnginfo:
grid.info["parameters"] = text grid.info["parameters"] = text
@ -773,10 +836,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
index_of_first_image = 1 index_of_first_image = 1
if opts.grid_save: if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(use_main_prompt=True), short_filename=not opts.grid_extended_filename, p=p, grid=True)
if not p.disable_extra_networks and extra_network_data: if not p.disable_extra_networks and p.extra_network_data:
extra_networks.deactivate(p, extra_network_data) extra_networks.deactivate(p, p.extra_network_data)
devices.torch_gc() devices.torch_gc()
@ -785,7 +848,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
images_list=output_images, images_list=output_images,
seed=p.all_seeds[0], seed=p.all_seeds[0],
info=infotext(), info=infotext(),
comments="".join(f"\n\n{comment}" for comment in comments), comments="".join(f"{comment}\n" for comment in comments),
subseed=p.all_subseeds[0], subseed=p.all_subseeds[0],
index_of_first_image=index_of_first_image, index_of_first_image=index_of_first_image,
infotexts=infotexts, infotexts=infotexts,
@ -811,8 +874,10 @@ def old_hires_fix_first_pass_dimensions(width, height):
class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
sampler = None sampler = None
cached_hr_uc = [None, None]
cached_hr_c = [None, None]
def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, **kwargs): def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, hr_sampler_name: str = None, hr_prompt: str = '', hr_negative_prompt: str = '', **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.enable_hr = enable_hr self.enable_hr = enable_hr
self.denoising_strength = denoising_strength self.denoising_strength = denoising_strength
@ -823,6 +888,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.hr_resize_y = hr_resize_y self.hr_resize_y = hr_resize_y
self.hr_upscale_to_x = hr_resize_x self.hr_upscale_to_x = hr_resize_x
self.hr_upscale_to_y = hr_resize_y self.hr_upscale_to_y = hr_resize_y
self.hr_sampler_name = hr_sampler_name
self.hr_prompt = hr_prompt
self.hr_negative_prompt = hr_negative_prompt
self.all_hr_prompts = None
self.all_hr_negative_prompts = None
if firstphase_width != 0 or firstphase_height != 0: if firstphase_width != 0 or firstphase_height != 0:
self.hr_upscale_to_x = self.width self.hr_upscale_to_x = self.width
@ -834,8 +904,26 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.truncate_y = 0 self.truncate_y = 0
self.applied_old_hires_behavior_to = None self.applied_old_hires_behavior_to = None
self.hr_prompts = None
self.hr_negative_prompts = None
self.hr_extra_network_data = None
self.cached_hr_uc = StableDiffusionProcessingTxt2Img.cached_hr_uc
self.cached_hr_c = StableDiffusionProcessingTxt2Img.cached_hr_c
self.hr_c = None
self.hr_uc = None
def init(self, all_prompts, all_seeds, all_subseeds): def init(self, all_prompts, all_seeds, all_subseeds):
if self.enable_hr: if self.enable_hr:
if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
if tuple(self.hr_prompt) != tuple(self.prompt):
self.extra_generation_params["Hires prompt"] = self.hr_prompt
if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt):
self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt
if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height): if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
self.hr_resize_x = self.width self.hr_resize_x = self.width
self.hr_resize_y = self.height self.hr_resize_y = self.height
@ -901,7 +989,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
if self.enable_hr and latent_scale_mode is None: if self.enable_hr and latent_scale_mode is None:
assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}" if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):
raise Exception(f"could not find upscaler named {self.hr_upscaler}")
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
@ -965,9 +1054,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
shared.state.nextjob() shared.state.nextjob()
img2img_sampler_name = self.sampler_name img2img_sampler_name = self.hr_sampler_name or self.sampler_name
if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM
img2img_sampler_name = 'DDIM' img2img_sampler_name = 'DDIM'
self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2] samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
@ -978,17 +1069,101 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
x = None x = None
devices.torch_gc() devices.torch_gc()
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) if not self.disable_extra_networks:
with devices.autocast():
extra_networks.activate(self, self.hr_extra_network_data)
with devices.autocast():
self.calculate_hr_conds()
sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio(for_hr=True))
if self.scripts is not None:
self.scripts.before_hr(self)
samples = self.sampler.sample_img2img(self, samples, noise, self.hr_c, self.hr_uc, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
sd_models.apply_token_merging(self.sd_model, self.get_token_merging_ratio())
self.is_hr_pass = False self.is_hr_pass = False
return samples return samples
def close(self):
super().close()
self.hr_c = None
self.hr_uc = None
if not opts.experimental_persistent_cond_cache:
StableDiffusionProcessingTxt2Img.cached_hr_uc = [None, None]
StableDiffusionProcessingTxt2Img.cached_hr_c = [None, None]
def setup_prompts(self):
super().setup_prompts()
if not self.enable_hr:
return
if self.hr_prompt == '':
self.hr_prompt = self.prompt
if self.hr_negative_prompt == '':
self.hr_negative_prompt = self.negative_prompt
if type(self.hr_prompt) == list:
self.all_hr_prompts = self.hr_prompt
else:
self.all_hr_prompts = self.batch_size * self.n_iter * [self.hr_prompt]
if type(self.hr_negative_prompt) == list:
self.all_hr_negative_prompts = self.hr_negative_prompt
else:
self.all_hr_negative_prompts = self.batch_size * self.n_iter * [self.hr_negative_prompt]
self.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_hr_prompts]
self.all_hr_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_hr_negative_prompts]
def calculate_hr_conds(self):
if self.hr_c is not None:
return
self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, self.hr_negative_prompts, self.steps * self.step_multiplier, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data)
self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, self.hr_prompts, self.steps * self.step_multiplier, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data)
def setup_conds(self):
super().setup_conds()
self.hr_uc = None
self.hr_c = None
if self.enable_hr:
if shared.opts.hires_fix_use_firstpass_conds:
self.calculate_hr_conds()
elif lowvram.is_enabled(shared.sd_model): # if in lowvram mode, we need to calculate conds right away, before the cond NN is unloaded
with devices.autocast():
extra_networks.activate(self, self.hr_extra_network_data)
self.calculate_hr_conds()
with devices.autocast():
extra_networks.activate(self, self.extra_network_data)
def parse_extra_network_prompts(self):
res = super().parse_extra_network_prompts()
if self.enable_hr:
self.hr_prompts = self.all_hr_prompts[self.iteration * self.batch_size:(self.iteration + 1) * self.batch_size]
self.hr_negative_prompts = self.all_hr_negative_prompts[self.iteration * self.batch_size:(self.iteration + 1) * self.batch_size]
self.hr_prompts, self.hr_extra_network_data = extra_networks.parse_prompts(self.hr_prompts)
return res
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None sampler = None
def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs): def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = None, mask_blur_x: int = 4, mask_blur_y: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.init_images = init_images self.init_images = init_images
@ -999,7 +1174,11 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.image_mask = mask self.image_mask = mask
self.latent_mask = None self.latent_mask = None
self.mask_for_overlay = None self.mask_for_overlay = None
self.mask_blur = mask_blur if mask_blur is not None:
mask_blur_x = mask_blur
mask_blur_y = mask_blur
self.mask_blur_x = mask_blur_x
self.mask_blur_y = mask_blur_y
self.inpainting_fill = inpainting_fill self.inpainting_fill = inpainting_fill
self.inpaint_full_res = inpaint_full_res self.inpaint_full_res = inpaint_full_res
self.inpaint_full_res_padding = inpaint_full_res_padding self.inpaint_full_res_padding = inpaint_full_res_padding
@ -1021,8 +1200,17 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
if self.inpainting_mask_invert: if self.inpainting_mask_invert:
image_mask = ImageOps.invert(image_mask) image_mask = ImageOps.invert(image_mask)
if self.mask_blur > 0: if self.mask_blur_x > 0:
image_mask = image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur)) np_mask = np.array(image_mask)
kernel_size = 2 * int(4 * self.mask_blur_x + 0.5) + 1
np_mask = cv2.GaussianBlur(np_mask, (kernel_size, 1), self.mask_blur_x)
image_mask = Image.fromarray(np_mask)
if self.mask_blur_y > 0:
np_mask = np.array(image_mask)
kernel_size = 2 * int(4 * self.mask_blur_y + 0.5) + 1
np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), self.mask_blur_y)
image_mask = Image.fromarray(np_mask)
if self.inpaint_full_res: if self.inpaint_full_res:
self.mask_for_overlay = image_mask self.mask_for_overlay = image_mask
@ -1141,3 +1329,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
devices.torch_gc() devices.torch_gc()
return samples return samples
def get_token_merging_ratio(self, for_hr=False):
return self.token_merging_ratio or ("token_merging_ratio" in self.override_settings and opts.token_merging_ratio) or opts.token_merging_ratio_img2img or opts.token_merging_ratio

View File

@ -95,9 +95,20 @@ def progressapi(req: ProgressRequest):
image = shared.state.current_image image = shared.state.current_image
if image is not None: if image is not None:
buffered = io.BytesIO() buffered = io.BytesIO()
image.save(buffered, format="png")
if opts.live_previews_image_format == "png":
# using optimize for large images takes an enormous amount of time
if max(*image.size) <= 256:
save_kwargs = {"optimize": True}
else:
save_kwargs = {"optimize": False, "compress_level": 1}
else:
save_kwargs = {}
image.save(buffered, format=opts.live_previews_image_format, **save_kwargs)
base64_image = base64.b64encode(buffered.getvalue()).decode('ascii') base64_image = base64.b64encode(buffered.getvalue()).decode('ascii')
live_preview = f"data:image/png;base64,{base64_image}" live_preview = f"data:image/{opts.live_previews_image_format};base64,{base64_image}"
id_live_preview = shared.state.id_live_preview id_live_preview = shared.state.id_live_preview
else: else:
live_preview = None live_preview = None

View File

@ -54,18 +54,21 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
""" """
def collect_steps(steps, tree): def collect_steps(steps, tree):
l = [steps] res = [steps]
class CollectSteps(lark.Visitor): class CollectSteps(lark.Visitor):
def scheduled(self, tree): def scheduled(self, tree):
tree.children[-1] = float(tree.children[-1]) tree.children[-1] = float(tree.children[-1])
if tree.children[-1] < 1: if tree.children[-1] < 1:
tree.children[-1] *= steps tree.children[-1] *= steps
tree.children[-1] = min(steps, int(tree.children[-1])) tree.children[-1] = min(steps, int(tree.children[-1]))
l.append(tree.children[-1]) res.append(tree.children[-1])
def alternate(self, tree): def alternate(self, tree):
l.extend(range(1, steps+1)) res.extend(range(1, steps+1))
CollectSteps().visit(tree) CollectSteps().visit(tree)
return sorted(set(l)) return sorted(set(res))
def at_step(step, tree): def at_step(step, tree):
class AtStep(lark.Transformer): class AtStep(lark.Transformer):
@ -92,7 +95,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
def get_schedule(prompt): def get_schedule(prompt):
try: try:
tree = schedule_parser.parse(prompt) tree = schedule_parser.parse(prompt)
except lark.exceptions.LarkError as e: except lark.exceptions.LarkError:
if 0: if 0:
import traceback import traceback
traceback.print_exc() traceback.print_exc()
@ -140,7 +143,7 @@ def get_learned_conditioning(model, prompts, steps):
conds = model.get_learned_conditioning(texts) conds = model.get_learned_conditioning(texts)
cond_schedule = [] cond_schedule = []
for i, (end_at_step, text) in enumerate(prompt_schedule): for i, (end_at_step, _) in enumerate(prompt_schedule):
cond_schedule.append(ScheduledPromptConditioning(end_at_step, conds[i])) cond_schedule.append(ScheduledPromptConditioning(end_at_step, conds[i]))
cache[prompt] = cond_schedule cache[prompt] = cond_schedule
@ -216,8 +219,8 @@ def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_s
res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
for i, cond_schedule in enumerate(c): for i, cond_schedule in enumerate(c):
target_index = 0 target_index = 0
for current, (end_at, cond) in enumerate(cond_schedule): for current, entry in enumerate(cond_schedule):
if current_step <= end_at: if current_step <= entry.end_at_step:
target_index = current target_index = current
break break
res[i] = cond_schedule[target_index].cond res[i] = cond_schedule[target_index].cond
@ -231,13 +234,13 @@ def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step):
tensors = [] tensors = []
conds_list = [] conds_list = []
for batch_no, composable_prompts in enumerate(c.batch): for composable_prompts in c.batch:
conds_for_batch = [] conds_for_batch = []
for cond_index, composable_prompt in enumerate(composable_prompts): for composable_prompt in composable_prompts:
target_index = 0 target_index = 0
for current, (end_at, cond) in enumerate(composable_prompt.schedules): for current, entry in enumerate(composable_prompt.schedules):
if current_step <= end_at: if current_step <= entry.end_at_step:
target_index = current target_index = current
break break
@ -333,11 +336,11 @@ def parse_prompt_attention(text):
round_brackets.append(len(res)) round_brackets.append(len(res))
elif text == '[': elif text == '[':
square_brackets.append(len(res)) square_brackets.append(len(res))
elif weight is not None and len(round_brackets) > 0: elif weight is not None and round_brackets:
multiply_range(round_brackets.pop(), float(weight)) multiply_range(round_brackets.pop(), float(weight))
elif text == ')' and len(round_brackets) > 0: elif text == ')' and round_brackets:
multiply_range(round_brackets.pop(), round_bracket_multiplier) multiply_range(round_brackets.pop(), round_bracket_multiplier)
elif text == ']' and len(square_brackets) > 0: elif text == ']' and square_brackets:
multiply_range(square_brackets.pop(), square_bracket_multiplier) multiply_range(square_brackets.pop(), square_bracket_multiplier)
else: else:
parts = re.split(re_break, text) parts = re.split(re_break, text)

View File

@ -1,15 +1,13 @@
import os import os
import sys
import traceback
import numpy as np import numpy as np
from PIL import Image from PIL import Image
from basicsr.utils.download_util import load_file_from_url
from realesrgan import RealESRGANer from realesrgan import RealESRGANer
from modules.upscaler import Upscaler, UpscalerData from modules.upscaler import Upscaler, UpscalerData
from modules.shared import cmd_opts, opts from modules.shared import cmd_opts, opts
from modules import modelloader from modules import modelloader, errors
class UpscalerRealESRGAN(Upscaler): class UpscalerRealESRGAN(Upscaler):
def __init__(self, path): def __init__(self, path):
@ -17,9 +15,9 @@ class UpscalerRealESRGAN(Upscaler):
self.user_path = path self.user_path = path
super().__init__() super().__init__()
try: try:
from basicsr.archs.rrdbnet_arch import RRDBNet from basicsr.archs.rrdbnet_arch import RRDBNet # noqa: F401
from realesrgan import RealESRGANer from realesrgan import RealESRGANer # noqa: F401
from realesrgan.archs.srvgg_arch import SRVGGNetCompact from realesrgan.archs.srvgg_arch import SRVGGNetCompact # noqa: F401
self.enable = True self.enable = True
self.scalers = [] self.scalers = []
scalers = self.load_models(path) scalers = self.load_models(path)
@ -36,8 +34,7 @@ class UpscalerRealESRGAN(Upscaler):
self.scalers.append(scaler) self.scalers.append(scaler)
except Exception: except Exception:
print("Error importing Real-ESRGAN:", file=sys.stderr) errors.report("Error importing Real-ESRGAN", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)
self.enable = False self.enable = False
self.scalers = [] self.scalers = []
@ -45,9 +42,10 @@ class UpscalerRealESRGAN(Upscaler):
if not self.enable: if not self.enable:
return img return img
try:
info = self.load_model(path) info = self.load_model(path)
if not os.path.exists(info.local_data_path): except Exception:
print(f"Unable to load RealESRGAN model: {info.name}") errors.report(f"Unable to load RealESRGAN model {path}", exc_info=True)
return img return img
upsampler = RealESRGANer( upsampler = RealESRGANer(
@ -65,21 +63,17 @@ class UpscalerRealESRGAN(Upscaler):
return image return image
def load_model(self, path): def load_model(self, path):
try: for scaler in self.scalers:
info = next(iter([scaler for scaler in self.scalers if scaler.data_path == path]), None) if scaler.data_path == path:
if scaler.local_data_path.startswith("http"):
if info is None: scaler.local_data_path = modelloader.load_file_from_url(
print(f"Unable to find model info: {path}") scaler.data_path,
return None model_dir=self.model_download_path,
)
if info.local_data_path.startswith("http"): if not os.path.exists(scaler.local_data_path):
info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_path, progress=True) raise FileNotFoundError(f"RealESRGAN data missing: {scaler.local_data_path}")
return scaler
return info raise ValueError(f"Unable to find model info: {path}")
except Exception as e:
print(f"Error making Real-ESRGAN models list: {e}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return None
def load_models(self, _): def load_models(self, _):
return get_realesrgan_models(self) return get_realesrgan_models(self)
@ -134,6 +128,5 @@ def get_realesrgan_models(scaler):
), ),
] ]
return models return models
except Exception as e: except Exception:
print("Error making Real-ESRGAN models list:", file=sys.stderr) errors.report("Error making Real-ESRGAN models list", exc_info=True)
print(traceback.format_exc(), file=sys.stderr)

23
modules/restart.py Normal file
View File

@ -0,0 +1,23 @@
import os
from pathlib import Path
from modules.paths_internal import script_path
def is_restartable() -> bool:
"""
Return True if the webui is restartable (i.e. there is something watching to restart it with)
"""
return bool(os.environ.get('SD_WEBUI_RESTART'))
def restart_program() -> None:
"""creates file tmp/restart and immediately stops the process, which webui.bat/webui.sh interpret as a command to start webui again"""
(Path(script_path) / "tmp" / "restart").touch()
stop_program()
def stop_program() -> None:
os._exit(0)

Some files were not shown because too many files have changed in this diff Show More