Compare commits

...

589 Commits

Author SHA1 Message Date
AUTOMATIC
22bcc7be42 attempted fix for infinite loading for settings that some people experience 2023-03-29 08:58:29 +03:00
AUTOMATIC
3856ada5cc do not add mask blur to infotext if there is no mask 2023-03-28 22:20:31 +03:00
AUTOMATIC
433b3ab701 Revert "Merge pull request #7931 from space-nuko/img2img-enhance"
This reverts commit 4268759370, reversing
changes made to 1b63afbedc.
2023-03-28 20:36:57 +03:00
AUTOMATIC1111
4268759370
Merge pull request #7931 from space-nuko/img2img-enhance
Add `Upscale by` and `Upscaler` options to img2img
2023-03-28 20:21:25 +03:00
AUTOMATIC
1b63afbedc sort hypernetworks and checkpoints by name 2023-03-28 20:03:57 +03:00
AUTOMATIC1111
8c69bd08c5
Merge pull request #9065 from missionfloyd/quicksettings-alignment
Fix quicksettings alignment
2023-03-28 19:53:30 +03:00
AUTOMATIC1111
daa5a83bb7
Merge pull request #9052 from space-nuko/temp-disable-extensions
Temporary disable extensions option
2023-03-28 19:42:09 +03:00
AUTOMATIC1111
cb3e1ba9bd
Merge pull request #9061 from zetclansu/patch-1
fix [Bug]: very wide image doesn't fit in inpaint. Update style.css
2023-03-28 19:40:52 +03:00
AUTOMATIC1111
f1db987e6a
Merge pull request #8958 from MrCheeze/variations-model
Add support for the unclip (Variations) models, unclip-h and unclip-l
2023-03-28 19:39:20 +03:00
AUTOMATIC1111
e49c479819
Merge pull request #9031 from AUTOMATIC1111/serve-css-as-files
serve css as independent files
2023-03-28 19:38:43 +03:00
space-nuko
4414d36bf6
Merge branch 'master' into img2img-enhance 2023-03-28 10:59:12 -04:00
missionfloyd
b3e593edcb
Fix quicksettings alignment 2023-03-27 16:38:42 -06:00
space-nuko
56f62d3851 Skip extension installers if all disabled 2023-03-27 17:23:20 -04:00
zetclansu
4b49020506
Update style.css
Fix for wide width image in img2img_sketch, img2maskimg, inpaint_sketch
2023-03-27 21:05:56 +03:00
space-nuko
fc8e1008ea Make disable configurable between builtin/extra extensions 2023-03-27 12:44:49 -04:00
space-nuko
2a4d3d2124 Add temporary "disable all extensions" option for debugging use 2023-03-27 12:04:45 -04:00
AUTOMATIC
77f9db3b08 serve css as independent files 2023-03-27 12:59:12 +03:00
AUTOMATIC1111
955df7751e
Merge pull request #9021 from pieresimakp/pr-dev
Fix the style box dropdown not wrapping in txt2img and img2img
2023-03-27 10:28:13 +03:00
AUTOMATIC
9e82896d5f remove an extra unneeded row in outputs 2023-03-27 10:20:01 +03:00
AUTOMATIC
5fcd4bfa3d do not read extensions' git stuff at startup 2023-03-27 10:02:30 +03:00
AUTOMATIC
5cf3822e46 Revert "Merge pull request #8651 from vladmandic/flicker"
This reverts commit 8402682118, reversing
changes made to e8bbc344c3.
2023-03-27 08:18:28 +03:00
AUTOMATIC1111
c7daba71de
Merge pull request #8669 from Vespinian/fix-api-running-unwanted_scripts
Fix for API running unwanted alwayson scripts
2023-03-27 08:11:34 +03:00
AUTOMATIC1111
769def1e41
Merge pull request #8944 from SirFrags/extra_save_geninfo
Transfer generation parameters to previews
2023-03-27 08:06:54 +03:00
AUTOMATIC
a70ae917ea update button pressed down style for #8569 2023-03-27 08:05:55 +03:00
AUTOMATIC1111
a7d6fc3b42
Merge pull request #8569 from missionfloyd/extra-networks-toggle
Make extra networks button togglable
2023-03-27 07:35:39 +03:00
AUTOMATIC1111
c5e1efb4ca
Merge pull request #8525 from bluelovers/pr/sort-001
feat: try sort as ignore-case
2023-03-27 07:34:18 +03:00
AUTOMATIC
b40538a7fe reformat css from latest commits, change color for dropdown selection to more neutral one 2023-03-27 07:30:38 +03:00
AUTOMATIC1111
8a454dab33
Merge pull request #8959 from missionfloyd/dropdown-width
Fix dropdown width
2023-03-27 07:16:37 +03:00
AUTOMATIC
3d09b4e99f remove -y, bring back -f 2023-03-27 07:12:40 +03:00
AUTOMATIC
ff0d97c1e3 bring back -y 2023-03-27 07:11:39 +03:00
AUTOMATIC1111
e8f34e3b41
Merge pull request #8938 from space-nuko/fix-image-ar-overlay
Fix img2img aspect ratio overlay in Gradio 3.23.0
2023-03-27 07:06:24 +03:00
AUTOMATIC1111
c19036d3fe
Merge pull request #8615 from gmikhail/readme-improvement
Minor README improvement
2023-03-27 06:56:38 +03:00
pieresimakp
68a5604cac Merge remote-tracking branch 'upstream/master' into pr-dev 2023-03-27 11:53:15 +08:00
AUTOMATIC1111
3b5a3fab91
Merge pull request #8943 from space-nuko/fix-accordion-padding
Fix padding on accordion/dropdown list elements
2023-03-27 06:45:39 +03:00
AUTOMATIC1111
a89af2325d
Merge pull request #8940 from space-nuko/fix-send-to-img2img
Fix Send to img2img buttons
2023-03-27 06:41:19 +03:00
AUTOMATIC1111
a336c7fe23
Merge pull request #9017 from camenduru/dev
convert to python v3.9
2023-03-27 06:38:17 +03:00
pieresimakp
774c691df8 fixed style box wrapping 2023-03-27 11:31:56 +08:00
camenduru
6a147db128
convert to python v3.9 2023-03-27 04:40:31 +03:00
camenduru
9d7390d2d1
convert to python v3.9 2023-03-27 04:28:40 +03:00
MrCheeze
1f08600345 overwrite xformers in the unclip model config if not available 2023-03-26 16:55:29 -04:00
AUTOMATIC
4c1ad743e3 for img2img, use None as upscaler instead of erroring out if the desired upscaler is not found 2023-03-26 11:01:32 +03:00
AUTOMATIC
532ac22b38 Merge branch 'lora_inplace' 2023-03-26 10:44:37 +03:00
AUTOMATIC
650ddc9dd3 Lora support for SD2 2023-03-26 10:44:20 +03:00
AUTOMATIC
b705c9b72b Merge branch 'lora_sd2' into lora_inplace 2023-03-26 07:04:43 +03:00
AUTOMATIC1111
64da5c46ef
Merge pull request #8931 from LipeCarmel/patch-1
loopback.py Colab compatibility and bug fix
2023-03-26 06:48:29 +03:00
missionfloyd
d286df0a71
Fix dropdown width 2023-03-25 21:00:02 -06:00
MrCheeze
8a34671fe9 Add support for the Variations models (unclip-h and unclip-l) 2023-03-25 21:03:07 -04:00
bluelovers
d64ff4248b remove changes in textual_inversion.py 2023-03-26 06:15:09 +08:00
missionfloyd
254ad09ef3
Update style.css 2023-03-25 15:01:10 -06:00
missionfloyd
6f18c9b13f
Merge branch 'master' into extra-networks-toggle 2023-03-25 14:51:25 -06:00
AUTOMATIC
80b26d2a69 apply Lora by altering layer's weights instead of adding more calculations in forward() 2023-03-25 23:06:33 +03:00
SirFrags
abc4d3a693
preview replace save params 2023-03-25 15:59:50 -04:00
space-nuko
d3b188c82d Fix padding on accordion/dropdown list elements 2023-03-25 15:52:06 -04:00
space-nuko
5eb7ff7768 Fix Send to img2img buttons 2023-03-25 14:52:47 -04:00
AUTOMATIC1111
a0d07fb580
Merge pull request #8936 from space-nuko/fix-notifications
Fix notifications not triggering
2023-03-25 21:45:40 +03:00
space-nuko
945f6e5e99 Fix img2img aspect ratio overlay in Gradio 3.23.0 2023-03-25 14:44:41 -04:00
space-nuko
9377092a89 Fix notifications not triggering 2023-03-25 14:28:20 -04:00
space-nuko
c5f9f7c237 Use .success() callback on img2img preview inputs change 2023-03-25 14:26:36 -04:00
Vespinian
23f6dfce4c Reworked this PR, now we have 2 default arg list (one for each tab) that will be initialized on the first api request and then reused afterwards. The init_script_args copies the corresponding default list and applies the modifications asked by the api request. 2023-03-25 14:16:35 -04:00
Vespinian
f371579571 Revert "Made copies of global scriptrunners, now we clear the copied scriptrunner of alwayson_scripts and only add back the ones that that were requested"
This reverts commit dfa258de5f.
2023-03-25 14:11:46 -04:00
Vespinian
db602b100e Revert "Comment fix"
This reverts commit f04bd037a5.
2023-03-25 14:11:38 -04:00
LipeCarmel
5c7ab90a4e
loopback.py Colab compatibility and bug fix
This code (suggested by @abvgdeabvgde2 ) literally does the same thing and it does not break with Python 3.9, making it helpful for Google Colab users (me included).
fixes #8927
Also a partial fix for #8902 but it does not resolve the unresponsive UI problem faced by @Archon332
2023-03-25 14:48:22 -03:00
space-nuko
75e7eb9172 img2img resolution preview should use currently selected tab's image 2023-03-25 12:53:03 -04:00
space-nuko
7ea5d395c4 Add upscaler to img2img 2023-03-25 12:52:43 -04:00
space-nuko
68999d0b15 Add upscale slider to img2img 2023-03-25 12:52:35 -04:00
AUTOMATIC1111
91ae48fd7e
Merge pull request #8921 from remixer-dec/fix-callstack-loop
fix overriding getElementById on document
2023-03-25 17:41:42 +03:00
AUTOMATIC
69eb2a9ee8 add missing extensions_dir, extensions_builtin_dir to extensions.py 2023-03-25 17:39:53 +03:00
Remixer Dec
22bfcf135f fix overriding getElementById on document 2023-03-25 18:23:34 +04:00
AUTOMATIC1111
b2c428a095
Merge pull request #8839 from pieresimakp/master
Add search textbox to filter available extensions by name/description
2023-03-25 16:15:42 +03:00
AUTOMATIC1111
9b2dcb04bc
Merge branch 'master' into master 2023-03-25 16:15:29 +03:00
AUTOMATIC
68953a4705 specify the tests dir in automated tests 2023-03-25 16:12:50 +03:00
AUTOMATIC
8c801362b4 split commandline args into its own file
make launch.py use the same command line argument parser as the main program
2023-03-25 16:05:25 +03:00
Andrey
3ec7e19f2b Split history: mv temp modules/shared.py 2023-03-25 14:30:50 +03:00
Andrey
932dbfe7d9 Split history: merge 2023-03-25 14:30:50 +03:00
Andrey
0450d90605 Split history: mv modules/shared.py temp 2023-03-25 14:30:50 +03:00
Andrey
a5cef4932f Split history: mv modules/shared.py modules/cmd_args.py 2023-03-25 14:30:49 +03:00
AUTOMATIC1111
aba5d639fb
Merge pull request #8547 from vladmandic/image-size
disable pil checks
2023-03-25 14:04:46 +03:00
AUTOMATIC1111
d9b9bf78b3
Merge pull request #8508 from vladmandic/lightning
allow usage of latest pytorch_lighning
2023-03-25 14:03:59 +03:00
AUTOMATIC1111
009bc9f534
Merge pull request #8698 from Narsil/update_safetensors
Updating safetensors version (fully backward compatible)
2023-03-25 13:06:05 +03:00
AUTOMATIC1111
6b00d876f7
Merge pull request #7936 from EllangoK/master
Custom height and width settings for Extra Networks cards
2023-03-25 13:01:49 +03:00
AUTOMATIC1111
a03536f213
Merge pull request #8662 from vladmandic/api-handler
api error handler
2023-03-25 12:47:32 +03:00
EllangoK
38335e1b8f readds metadata_button 2023-03-25 05:47:06 -04:00
AUTOMATIC
616cc13b6b update overflow-wrap: break-word; for gradio 3.23 2023-03-25 12:43:14 +03:00
AUTOMATIC1111
bb851e84c1
Merge pull request #8723 from whwlsfb/patch-1
fix output-html text overflow.
2023-03-25 12:36:43 +03:00
AUTOMATIC1111
db7caf9b9c
Merge branch 'master' into patch-1 2023-03-25 12:36:35 +03:00
AUTOMATIC
c1294d849a make it possible for user to enable gradio analytics by setting GRADIO_ANALYTICS_ENABLED=True 2023-03-25 12:21:18 +03:00
AUTOMATIC1111
501f40d834
Merge pull request #8658 from hananbeer/disable_gradio_analytics
preserve privacy by disabling gradio analytics globally
2023-03-25 12:19:40 +03:00
AUTOMATIC1111
983d48a921
Merge pull request #8772 from mcmonkey4eva/img2img-alt-sd2-fix
Fix img2img-alternative-test script for SD v2.x
2023-03-25 12:16:09 +03:00
Karun
63a2f8d822
Merge branch 'master' into master 2023-03-25 05:12:55 -04:00
AUTOMATIC
b7c14ed041 final part of merging #8749 2023-03-25 12:10:03 +03:00
AUTOMATIC1111
e442b73633
Merge pull request #8749 from missionfloyd/extra-network-info
Don't bubble when metadata_button is clicked
2023-03-25 12:09:05 +03:00
AUTOMATIC1111
8dbe793af5
Merge branch 'master' into extra-network-info 2023-03-25 12:08:24 +03:00
AUTOMATIC1111
70615448b2
Merge pull request #8717 from nonnonstop/fix-installpy
Fix problem of install.py when data-dir is specified
2023-03-25 12:05:44 +03:00
AUTOMATIC1111
956ed9a737
Merge pull request #8780 from Brawlence/master
Unload and re-load checkpoint to VRAM on request (API & Manual)
2023-03-25 12:03:26 +03:00
AUTOMATIC1111
8d2c582e3e
Merge pull request #8797 from ArrowM/master
Move `load_file_from_url` import
2023-03-25 11:34:07 +03:00
AUTOMATIC1111
c0a7ff8055
Merge pull request #8803 from mlhub-action/fix_scripts_load_order
Fix scripts load order
2023-03-25 11:23:41 +03:00
AUTOMATIC1111
90410e212f
Merge pull request #8814 from catboxanon/inpaint-mask
Add ability to display and/or save inpainting mask and masked composite
2023-03-25 11:21:45 +03:00
AUTOMATIC1111
8402682118
Merge pull request #8651 from vladmandic/flicker
disable gradio css transitions
2023-03-25 11:05:50 +03:00
AUTOMATIC1111
e8bbc344c3
Merge pull request #8824 from zimkjh/fix-variable-typo
fix variable typo
2023-03-25 11:04:46 +03:00
AUTOMATIC1111
9d2551d593
Merge pull request #8647 from Tps-F/add_submodule
Support git submodule for extension and Fix Windows PermissionError
2023-03-25 11:02:41 +03:00
AUTOMATIC1111
442f710d94
Merge pull request #8799 from JaRail/master
Loopback Script Updates
2023-03-25 10:41:24 +03:00
AUTOMATIC1111
2664198584
Merge pull request #8801 from EllangoK/xyz-values
Fixes xyz extra_generation_params not being saved (previously worked)
2023-03-25 10:39:19 +03:00
AUTOMATIC1111
275834ca97
Merge pull request #8731 from jokker87/master
fixed typo in prompt-bracket-checker.js which leads to js error
2023-03-25 10:29:27 +03:00
AUTOMATIC
9ed04e759d use HTTP request to fetch metadata for Lora cards instead of including it into the main page 2023-03-25 10:11:04 +03:00
AUTOMATIC1111
724a63714d
Merge pull request #8878 from butaixianran/master
Fix None type error for TI module
2023-03-25 09:20:43 +03:00
AUTOMATIC1111
89ea746f7c
Merge pull request #8866 from brkirch/mps-torch-2-0-nn-linear-workarounds
Add PyTorch 2.0 support for macOS, fix image generation on macOS 13.2.X
2023-03-25 09:17:09 +03:00
AUTOMATIC1111
03c8eefbcc
Merge pull request #8782 from FNSpd/master
--upcast-sampling support for CUDA
2023-03-25 09:10:01 +03:00
AUTOMATIC
b0b777e64d Merge branch 'gradio-3-22' 2023-03-25 09:00:51 +03:00
AUTOMATIC
58c3144d2b fix generate forever and other context menus 2023-03-25 09:00:38 +03:00
AUTOMATIC
133fd7bea5 restore interrupt/stop button styling 2023-03-25 09:00:38 +03:00
AUTOMATIC
889f5e38a1 fix clipping in lightbox image viewer 2023-03-25 09:00:38 +03:00
AUTOMATIC
4697def235 bump gradio to 3.23
fix broken image dragging
2023-03-25 09:00:37 +03:00
AUTOMATIC
ff216820fd fix extra networks ui 2023-03-25 09:00:37 +03:00
AUTOMATIC
af2db25c84 enable queue by default
more stylistic changes
2023-03-25 09:00:37 +03:00
AUTOMATIC
43a0912a07 hide delete button for single-item dropdown
more stylistic changes
2023-03-25 09:00:37 +03:00
AUTOMATIC
9b2f205400 fix ctrl+up/down attention edit
fix dropdown obscured by live preview
stylistic changes
2023-03-25 09:00:36 +03:00
AUTOMATIC
9f0da9f6ed initial gradio 3.22 support 2023-03-25 09:00:36 +03:00
AUTOMATIC
23d68bfc9a fix generate forever and other context menus 2023-03-25 08:54:01 +03:00
AUTOMATIC
82905f520c restore interrupt/stop button styling 2023-03-25 08:48:34 +03:00
AUTOMATIC
1bfa1be6dd fix clipping in lightbox image viewer 2023-03-25 08:28:21 +03:00
AUTOMATIC
9e1afa9eb4 bump gradio to 3.23
fix broken image dragging
2023-03-25 07:29:51 +03:00
FNSpd
a9eab236d7
Update devices.py 2023-03-24 23:08:30 +04:00
butaixianran
803d44c474
Fix None type error for TI module
When user using model_name.png as a preview image, textural_inversion.py still treat it as an embeding, and didn't handle its error, just let python throw out an None type error like following:
```bash
  File "D:\Work\Dev\AI\stable-diffusion-webui\modules\textual_inversion\textual_inversion.py", line 155, in load_from_file
    name = data.get('name', name)
AttributeError: 'NoneType' object has no attribute 'get'
```

With just a simple `if data:` checking as following, there will be no error, breaks nothing, and now this module can works fine with user's preview images.
Old code:  
```python
                data = extract_image_data_embed(embed_image)
                name = data.get('name', name)
```
New code:  
```python
                data = extract_image_data_embed(embed_image)
                if data:
                    name = data.get('name', name)
                else:
                    # if data is None, means this is not an embeding, just a preview image
                    return
```

Also, since there is no more errors on textual inversion module, from now on, extra network can set "model_name.png" as preview image for embedings.
2023-03-25 02:05:00 +08:00
FNSpd
280ed8f00f
Update sd_hijack_optimizations.py 2023-03-24 16:29:16 +04:00
FNSpd
beb7dda5d6
Update sd_hijack_unet.py 2023-03-24 16:25:42 +04:00
brkirch
27fe3eb6a9 Add workaround for MPS layer_norm on PyTorch 2.0
On PyTorch 2.0, with MPS layer_norm only accepts float32 inputs. This was fixed shortly after 2.0 was finalized so the workaround can be applied with an exact version match.
2023-03-24 04:04:22 -04:00
brkirch
c5142e2fbe Add workaround for broken nn.Linear on macOS 13.2
Credit to danieldk (https://github.com/explosion/curated-transformers/pull/124) for the workaround this is based on.
2023-03-24 04:04:20 -04:00
pieresimakp
252f15e046 added search textbox to filter available extensions 2023-03-23 23:43:00 +08:00
James Railton
a9eef1fbb1 Fix "masked content" in loopback script
The loopback script did not set masked content to original after first loop. So each loop would apply a fill, or latent mask. This would essentially reset progress each loop.

The desired behavior is to use the mask for the first loop, then continue to iterate on the results of the previous loop.
2023-03-23 10:44:25 -04:00
carat-johyun
92e173d414 fix variable typo 2023-03-23 14:28:08 +09:00
catboxanon
caf84e8233 Expose inpainting mask and composite
For inpainting, this exposes the mask and masked composite and gives
the user the ability to display these in the web UI,
save to disk, or both.
2023-03-22 17:51:40 +00:00
sumof2primes
cd3cd0fca0 Fix scripts load order
- 1st webui, 2nd extensions-builtin, 3rd extensions
 - to load scripts independent of --data-dir
 - change load order key [x.basedir, x.filename, x.path] to [orderby(x.basedir), x.filename, x.path]

e.g., scripts/xyz_grid.py dependent extentions should loaded later
extensions\sd-webui-controlnet\scripts\xyz_grid_support.py
extensions\sd-webui-additional-networks\scripts\xyz_grid_support.py
2023-03-23 01:28:09 +09:00
sumof2primes
64b7e83823 Fix scripts load order
- 1st webui, 2nd extensions-builtin, 3rd extensions
 - to load scripts independent of --data-dir
 - change load order key [x.basedir, x.filename, x.path] to [orderby(x.basedir), x.filename, x.path]

e.g., scripts/xyz_grid.py dependent extentions should loaded later
extensions\sd-webui-controlnet\scripts\xyz_grid_support.py
extensions\sd-webui-additional-networks\scripts\xyz_grid_support.py
2023-03-22 18:24:11 +09:00
EllangoK
e7ac09b25a fixes xyz extra_generation_params not being saved 2023-03-22 02:11:38 -04:00
ArrowM
00bd271faf Move load_file_from_url
Why?
one of the internal calls of `load_file_from_url` import cv2, which locks the cv2 site-package, which extensions may (and in our case, is) breaking the installation of some libraries. The base project should be limiting its import of unnecessary libraries when possible during the installation phase.
2023-03-21 21:13:30 -05:00
James Railton
33b8539147 Loopback Script Updates
- Improved user experience. You can now pick the denoising strength of the final loop and one of three curves. Previously you picked a multiplier such as 0.98 or 1.03 to define the change to the denoising strength for each loop. You had to do a ton of math in your head to visualize what was happening. The new UX makes it very easy to understand what's going on and tweak.
- For batch sizes over 1, intermediate images no longer returned. For a batch size of 1, intermediate images from each loop will continue to be returned. When more than 1 image is returned, a grid will also be generated. Previously for larger jobs, you'd get back a mess of many grids and potentially hundreds of images with no organization. To make large jobs usable, only final images are returned.
- Added support for skipping current image. Fixed interrupt to cleanly end and return images. Previously these would throw.
- Improved tooltip descriptions
- Fix some edge cases
2023-03-21 21:07:33 -04:00
FNSpd
2f0181405f
Update lora.py 2023-03-21 14:53:51 +04:00
FNSpd
c84c9df737
Update sd_hijack_optimizations.py 2023-03-21 14:50:22 +04:00
FNSpd
91cfa9718c
Update sd_hijack_unet.py 2023-03-21 14:47:43 +04:00
FNSpd
254d994643
Update devices.py 2023-03-21 14:45:39 +04:00
Φφ
4cbbb881ee Unload checkpoints on Request
…to free VRAM.

New Action buttons in the settings to manually free and reload checkpoints, essentially
juggling models between RAM and VRAM.
2023-03-21 09:28:50 +03:00
AUTOMATIC
d3dcb05904 fix extra networks ui 2023-03-21 09:24:19 +03:00
AUTOMATIC
6eacaad4a9 enable queue by default
more stylistic changes
2023-03-21 08:49:08 +03:00
AUTOMATIC
f93547be18 hide delete button for single-item dropdown
more stylistic changes
2023-03-21 08:18:14 +03:00
AUTOMATIC
46482decd5 fix ctrl+up/down attention edit
fix dropdown obscured by live preview
stylistic changes
2023-03-21 06:49:19 +03:00
missionfloyd
8e3ced73a8 Add event.stopPropagation() to extraNetworksShowMetadata()
Prevent bubbling the same way "replace preview" does
2023-03-20 18:04:22 -06:00
Alex "mcmonkey" Goodwin
c9c692c4d9 cleanup the img2img alt file a bit 2023-03-20 15:43:01 -07:00
Alex "mcmonkey" Goodwin
05ec128ca9 fix img2img alt for SD v2.x 2023-03-20 15:42:36 -07:00
AUTOMATIC
8ea8e712c4 initial gradio 3.22 support 2023-03-20 16:09:36 +03:00
missionfloyd
64fc936738
Don't bubble when metadata_button is clicked 2023-03-19 19:30:28 -06:00
Michael Bachmann
cf17dfcd64 fixed typo in prompt-bracket-checker.js which leads to js error 2023-03-19 14:50:44 +01:00
whw1sfb
e5dd5d7335
fix output-html text overflow. 2023-03-19 14:05:01 +08:00
nonnonstop
b9a66b02d0
Fix problem of install.py when data-dir is specified 2023-03-19 01:17:04 +09:00
Nicolas Patry
4f415ad639 Updating safetensors version (fully backward compatible)
- Main takeaway is that the newly created files should load better
  because pointer alignment is forced
2023-03-17 09:02:36 +01:00
Ftps
6f5a5ad205
Delete settings.json 2023-03-16 12:36:11 +09:00
Ftps
147d2922ff Cross device link 2023-03-16 12:35:48 +09:00
Vespinian
f04bd037a5 Comment fix 2023-03-15 22:27:54 -04:00
Vespinian
dfa258de5f Made copies of global scriptrunners, now we clear the copied scriptrunner of alwayson_scripts and only add back the ones that that were requested 2023-03-15 22:17:32 -04:00
missionfloyd
575c17a8f9
Update tooltip per Kilvoctu's suggestion 2023-03-15 16:56:27 -06:00
Vladimir Mandic
5387576c59
api error handler 2023-03-15 15:11:04 -04:00
high_byte
79d261b7d4 disable gradio analytics globally 2023-03-15 19:44:30 +02:00
Vladimir Mandic
250193ee93
disable gradio css transitions 2023-03-15 10:14:40 -04:00
Ftps
79ed567b12 remove unused library
I'm sorry I forgot.
2023-03-15 22:42:53 +09:00
Ftps
4845db4e32 Update ui_extensions.py
Add git submodule and Fix WinError
2023-03-15 20:29:50 +09:00
bluelovers
fd672a79af fix: remove cmp
by ChatGPT
2023-03-15 13:17:09 +08:00
Vladimir Mandic
f2ed6295b9
make it module specific 2023-03-14 07:46:09 -04:00
Mikhail Gribanov
1823526c10 Update README.md 2023-03-14 13:05:45 +02:00
AUTOMATIC1111
a9fed7c364
Merge pull request #8503 from mcmonkey4eva/filename-length-limit-fix
Add correction file filename length limits on *nix systems
2023-03-14 11:28:13 +03:00
AUTOMATIC
6a04a7f20f fix an error loading Lora with empty values in metadata 2023-03-14 11:22:29 +03:00
AUTOMATIC1111
8b35b64e11
Merge pull request #8589 from vladmandic/unipc
add progressbar to unipc sampler
2023-03-14 11:19:21 +03:00
AUTOMATIC1111
f9b0465c8b
Merge pull request #8588 from hananbeer/fix/undefined_extra_network_data
initialize extra_network_data before use
2023-03-14 11:18:30 +03:00
AUTOMATIC1111
58c4777cc0
Merge pull request #8607 from willtakasan/patch-2
Update ui_extra_networks.py
2023-03-14 11:12:31 +03:00
willtakasan
4281432594
Update ui_extra_networks.py
I updated it so that no error message is displayed when setting a webp for the preview image.
2023-03-14 15:36:08 +09:00
AUTOMATIC
c19530f1a5 Add view metadata button for Lora cards. 2023-03-14 09:10:26 +03:00
Vladimir Mandic
03a80f198e
add pbar to unipc 2023-03-13 12:35:30 -04:00
high_byte
4d26c7da57 initialize extra_network_data before use 2023-03-13 17:37:29 +02:00
missionfloyd
9e23bacfbc Make extra networks button togglable 2023-03-12 17:07:03 -06:00
Alex "mcmonkey" Goodwin
af9158a8c7 update fullfn properly 2023-03-12 12:36:04 -07:00
Alex "mcmonkey" Goodwin
48df6d66ea add safety check in case of short extensions
so eg if a two-letter or empty extension is used, `.txt` would break, this `max` call protects that.
2023-03-12 12:33:29 -07:00
Alex "mcmonkey" Goodwin
a71b7b5ec0 relocate filename length limit to better spot 2023-03-12 12:30:31 -07:00
AUTOMATIC
dfeee786f9 display correct timings after restarting UI 2023-03-12 21:25:22 +03:00
AUTOMATIC
a00cd8b9c1 attempt to fix memory monitor with multiple CUDA devices 2023-03-12 21:04:17 +03:00
AUTOMATIC
6033de18bf revert webui.sh from #8492 2023-03-12 20:50:02 +03:00
AUTOMATIC
27eedb6966 change extension index link to the new dedicated repo instead of wiki 2023-03-12 17:20:17 +03:00
AUTOMATIC1111
806aa5e8e7
Merge pull request #8548 from vladmandic/total-tqdm
force refresh tqdm before close
2023-03-12 16:51:30 +03:00
Vladimir Mandic
bd67c41f54
force refresh tqdm before close 2023-03-12 09:19:23 -04:00
Vladimir Mandic
8179901f9c
disable pil checks 2023-03-12 09:12:54 -04:00
Vladimir Mandic
fc4d593b4e
fix import 2023-03-12 08:51:12 -04:00
AUTOMATIC
3c922d983b fix #8492 breaking the program when the directory with code contains spaces. 2023-03-12 12:11:51 +03:00
AUTOMATIC
5c9f2bbb74 do not import modules.paths in launch.py 2023-03-12 08:58:58 +03:00
AUTOMATIC1111
adf723a9b2
Merge pull request #8492 from zhanghua000/absolute-path
fix: gradio's ValueError about fetching extensions files
2023-03-12 08:55:15 +03:00
AUTOMATIC1111
ddc503d14c
Merge pull request #8509 from vladmandic/fastapi
allow usage of latest fastapi
2023-03-12 08:26:57 +03:00
AUTOMATIC1111
6106f6d0a0
Merge pull request #7965 from Kilvoctu/extranet-buttons
Use emojis for extra network buttons
2023-03-12 08:25:03 +03:00
AUTOMATIC1111
beb96bd115
Merge pull request #8515 from EllangoK/unipc-typo
Fix dims typo in unipc
2023-03-12 08:24:17 +03:00
AUTOMATIC1111
bbc4b0478a
Merge pull request #8518 from brkirch/remove-bool-test
Fix image generation on macOS 13.3 betas
2023-03-12 08:14:26 +03:00
AUTOMATIC1111
55ccc8fe6f
Merge pull request #8523 from hananbeer/feature/xyz_face_restore
add face restoration option to xyz_grid
2023-03-12 08:12:47 +03:00
AUTOMATIC1111
3e2ac603e9
Merge pull request #8156 from bluelovers/pr/lightbox-001
feat: better lightbox when not enable zoom
2023-03-12 08:06:27 +03:00
AUTOMATIC1111
ab0b0e1e76
Merge pull request #8187 from Vespinian/master
Add a way for API txt2img and img2img requests to pass args to always on scripts
2023-03-12 08:00:55 +03:00
bluelovers
db85421da1 feat: better lightbox when not enable zoom 2023-03-12 10:25:42 +08:00
bluelovers
0492424121 feat: try sort as ignore-case
https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/8368
2023-03-12 10:18:33 +08:00
high_byte
5ed5e95fb8 add face restoration option to xyz_grid 2023-03-12 03:29:07 +02:00
brkirch
a4cb96d4ae Remove test, use bool tensor fix by default
The test isn't working correctly on macOS 13.3 and the bool tensor fix for cumsum is currently always needed anyway, so enable the fix by default.
2023-03-11 17:35:17 -05:00
EllangoK
48f4abd2e6 fix dims typo in unipc 2023-03-11 15:52:14 -05:00
Vespinian
64efb3d9e0 Merge branch 'master' of https://github.com/AUTOMATIC1111/stable-diffusion-webui 2023-03-11 14:35:20 -05:00
Vespinian
49bbdbe447 small diff whitespace cleanup 2023-03-11 14:34:56 -05:00
Kilvoctu
247a34498b restore text, remove 'close'
don't use emojis for extra network buttons; remove 'close'
2023-03-11 13:11:26 -06:00
AUTOMATIC
27e319dc4f alternative solution for #8089 2023-03-11 21:22:52 +03:00
Vespinian
5546e71a10 Fixed whitespace 2023-03-11 12:35:20 -05:00
Vespinian
46f9fe3cd6 Merge branch 'master' of https://github.com/AUTOMATIC1111/stable-diffusion-webui 2023-03-11 12:33:35 -05:00
Vespinian
2174f58dae Changed alwayson_script_name and alwayson_script_args api params to 1 alwayson_scripts param dict 2023-03-11 12:21:33 -05:00
Vladimir Mandic
29ce0bf4f2
allow usage of latest fastapi 2023-03-11 12:01:08 -05:00
Vladimir Mandic
fb088bfb64
all usage of newer pytorch_lighning 2023-03-11 11:13:21 -05:00
AUTOMATIC
94ffa9fc53 emergency fix for xyz plot 2023-03-11 18:55:48 +03:00
Alex "mcmonkey" Goodwin
7fd19fa4e7 initial fix for filename length limits on *nix systems 2023-03-11 07:22:22 -08:00
AUTOMATIC
5cea278d3a bump GitPython to 3.1.30 because some people would be upset about it being below that version #8118 2023-03-11 17:51:55 +03:00
AUTOMATIC1111
e0ca78509a
Merge pull request #8118 from adam-huganir/8116-gitpython-api-breaking-change
git 3.1.30 api change, issue #8116
2023-03-11 17:47:51 +03:00
Adam Huganir
1e1a32b130
Update requirements_versions.txt
revert back to .27
2023-03-11 09:34:17 -05:00
AUTOMATIC1111
d3dd6cc01c
Merge pull request #8175 from vladmandic/image_size
adds checks for resulting image size to avoid memory issues
2023-03-11 16:45:35 +03:00
AUTOMATIC1111
76bc72116e
Merge pull request #7818 from space-nuko/extension-paste-field-names
Allow extensions to declare paste fields for "Send to X" buttons
2023-03-11 16:44:20 +03:00
AUTOMATIC1111
ef9efb61aa
Merge pull request #7954 from EllangoK/xyz-newline
Fixes newlines within checkpoints being detected as its own entry
2023-03-11 16:37:51 +03:00
AUTOMATIC1111
b9fd9c81de
Merge pull request #7963 from xSinStarx/patch-1
Bug Fix: Fixes img2img Negative Token Counter
2023-03-11 16:34:34 +03:00
Vladimir Mandic
a47c18297e
use assert instead of return 2023-03-11 08:33:55 -05:00
AUTOMATIC
52dcf0f0c7 record startup time 2023-03-11 16:27:58 +03:00
AUTOMATIC1111
f968270fec
Merge pull request #7812 from vladmandic/hide_ui_tabs
allow configurable hiding of ui tabs
2023-03-11 16:00:43 +03:00
AUTOMATIC1111
4637116341
Merge pull request #8021 from 112292454/master
continue fix prompt_matrix.py when high-res
2023-03-11 15:59:13 +03:00
AUTOMATIC
6da2027213 save previews for extra networks in the selected format 2023-03-11 15:46:20 +03:00
AUTOMATIC
9320139bd8 support three extensions for preview instead of one: png, jpg, webp 2023-03-11 15:33:24 +03:00
AUTOMATIC
ce68ab8d0d remove underscores from function names in #8366
remove LRU from #8366 because I don't know why it's there
2023-03-11 15:27:42 +03:00
AUTOMATIC1111
c239b3d7a8
Merge pull request #8366 from akx/extra-net-descs
Extra network description files
2023-03-11 15:20:34 +03:00
AUTOMATIC1111
92bb54720f
Merge pull request #8031 from DrakeRichards/notification-fix
Bug fix: Added results selector for notifications
2023-03-11 15:19:03 +03:00
AUTOMATIC1111
e15c4f31e3 Merge pull request #8042 from fkunn1326/master
Add .mjs support for extensions
2023-03-11 15:18:05 +03:00
AUTOMATIC
f36ba9949a add credit for UniPC sampler into the readme 2023-03-11 15:02:58 +03:00
Zhang Hua
d25c4b13e4
test/basic_features/{extras,img2img}_test.py: use absolute path 2023-03-11 20:00:12 +08:00
Zhang Hua
9abe2f5e74
test/server_poll.py: use absolute path for test
test/server_poll.py: fix absolute path
2023-03-11 20:00:12 +08:00
Zhang Hua
8e0d16e746
modules/sd_vae_approx.py: fix VAE-approx path 2023-03-11 20:00:12 +08:00
Zhang Hua
8106117a47
models/ui.py: make the path of script.js absolute 2023-03-11 20:00:11 +08:00
Zhang Hua
1fa1ab5249
launch.py: fix failure because webui.sh's changes
launch.py: using getcwd() instead curdir

launch.py: use absolute path for preparing

also remove chdir()

launch.py: use absolute path for test

launch.py: add default script_path and data_path
2023-03-11 20:00:11 +08:00
Zhang Hua
d006108d75
webui.sh: remove all cd related code
This may be helpful for
https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/7028,
because we won't change working directory to the repo now, instead,
we will use any working directory. If we set working directory to
a path contains repo and the custom --data-dir, the problem in this
issue should be solved.

Howewer, this may be treated as an incompatible change if some code
assume the working directory is always the repo.

Also, there may be another solution that always let --data-dir be the
subdirectory of the repo, but personally I think this may not be what
we actually need.

As this issue mainly influent on Docker and I am not familiar with
.bat files, updating webui.bat is skipped.

webui.sh: source env from repo instead $PWD
2023-03-11 20:00:11 +08:00
AUTOMATIC1111
98695c1885
Merge pull request #8050 from Tpinion/master
Bugfix: Filter out temporary files that will be generated if the download fails.
2023-03-11 14:58:45 +03:00
AUTOMATIC
7f2005127f rename CFGDenoiserParams fields for #8064 2023-03-11 14:52:29 +03:00
AUTOMATIC1111
af416a2dbd
Merge pull request #8064 from laksjdjf/master
Add cond and uncond hidden states to CFGDenoiserParams
2023-03-11 14:48:55 +03:00
AUTOMATIC1111
2b0ef2c4e6
Merge pull request #8112 from vladmandic/progressbar
fix progressbar
2023-03-11 14:36:23 +03:00
AUTOMATIC1111
45905b92a9
Merge pull request #8100 from missionfloyd/webp-lossless
Add lossless webp option
2023-03-11 14:35:53 +03:00
AUTOMATIC1111
da3f942ab2
Merge pull request #8017 from space-nuko/before-process-batch
Add `before_process_batch` script callback
2023-03-11 14:33:38 +03:00
AUTOMATIC
aaa367e35c new setting: Extra text to add before <...> when adding extra network to prompt 2023-03-11 14:18:18 +03:00
AUTOMATIC1111
5fd1158b9e
Merge pull request #8496 from butaixianran/master
update "replace preview" link button's css
2023-03-11 14:10:25 +03:00
AUTOMATIC1111
9df72be90e
Merge pull request #8202 from Ju1-js/master
Gradio auth logic fix - Handle empty/newlines
2023-03-11 14:02:07 +03:00
AUTOMATIC1111
9860a6acda
Merge pull request #8326 from DejitaruJin/xyz-order-fix
Xyz order fix
2023-03-11 13:51:35 +03:00
AUTOMATIC1111
6705b1764a
Merge pull request #8092 from infinitewarp/sort-upscalers
sort upscalers by name
2023-03-11 13:44:41 +03:00
AUTOMATIC1111
8ec0442dcd
Merge pull request #8322 from yeataro/master
add: /sdapi/v1/scripts in API
2023-03-11 13:43:34 +03:00
butaixianran
946797b01d
update "replace preview" link button's css
modify css
`.extra-network-thumbs .card:hover .additional a` 's value from `block` to `inline-block`.
So, extensions can add more buttons to extra network's thumbnail card.
2023-03-11 18:42:14 +08:00
AUTOMATIC
3531a50080 rename fields for API for saving/sending images
save images to correct directories
2023-03-11 13:22:59 +03:00
AUTOMATIC1111
bb3ecc3285
Merge pull request #8287 from vladmandic/save-images
Allow saving of images generated via API
2023-03-11 13:03:23 +03:00
AUTOMATIC1111
d81c503918
Merge pull request #8367 from pamparamm/scaled-dot-product-attention
Add scaled dot product attention
2023-03-11 12:24:03 +03:00
AUTOMATIC
1ace16e799 use path to git from env variable for git_pull_recursive 2023-03-11 12:21:53 +03:00
AUTOMATIC1111
f0a917c990
Merge pull request #8425 from vladlearns/master
feat: auto update all extensions using flag
2023-03-11 12:20:30 +03:00
AUTOMATIC
58b5b7c2f1 add UniPC options to infotext 2023-03-11 12:09:36 +03:00
AUTOMATIC
f261a4a53c use selected device instead of always cuda for UniPC sampler 2023-03-11 11:56:05 +03:00
AUTOMATIC1111
a11ce2b96c
Merge pull request #7710 from space-nuko/unipc
Implement UniPC sampler
2023-03-11 11:45:31 +03:00
space-nuko
5fef67f6ee Requested changes 2023-03-10 19:56:14 -05:00
space-nuko
ac38ad7e60 Merge remote-tracking branch 'origin/master' into unipc 2023-03-10 19:42:46 -05:00
Pam
8d7fa2f67c sdp_attnblock_forward hijack 2023-03-10 22:48:41 +05:00
Vladimir Mandic
1226028b9c
fix silly math error 2023-03-10 11:21:48 -05:00
Pam
0981dea948 sdp refactoring 2023-03-10 12:58:10 +05:00
Pam
37acba2633 argument to disable memory efficient for sdp 2023-03-10 12:19:36 +05:00
vladlearns
13081dd45e chore: added autostash flag to pull 2023-03-09 16:56:06 +02:00
vladlearns
b07b7057f0 chore: removed scripts and added a flag to launch.py 2023-03-09 16:29:07 +02:00
vladlearns
09c73710c9 chore: auto update all extensions using scripts 2023-03-08 23:00:55 +02:00
Yea Chen
f85a192f99
Update modules/api/api.py
Suggested change by @akx

Co-authored-by: Aarni Koskela <akx@iki.fi>
2023-03-07 04:04:35 +08:00
Pam
fec0a89511 scaled dot product attention 2023-03-07 00:33:13 +05:00
Aarni Koskela
06f167da37 Extra networks: support .txt description sidecar file 2023-03-06 21:15:33 +02:00
Aarni Koskela
49b1dc5e07 Deduplicate extra network preview-search code 2023-03-06 21:00:34 +02:00
Brad Smith
d118cb6ea3
use lowercase name for sorting; keep UpscalerLanczos and UpscalerNearest at the start of the list with UpscalerNone
Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com>
2023-03-06 13:21:42 -05:00
DejitaruJin
c8b52c7975
Short-circuit error handling 2023-03-04 19:32:09 -05:00
Vladimir Mandic
b012d70f15
update using original defaults 2023-03-04 17:51:37 -05:00
DejitaruJin
eb29ff211a
Add files via upload 2023-03-04 16:06:40 -05:00
DejitaruJin
fe7d7dfd5a
Add files via upload 2023-03-04 15:40:35 -05:00
DejitaruJin
2ba880704b
Add files via upload 2023-03-04 13:00:27 -05:00
DejitaruJin
2d9635cce5
Fix display and save order for X/Y/Z Grid script 2023-03-04 12:51:55 -05:00
Yea chen
c48bbccf12 add: /sdapi/v1/scripts in API
API for get scripts list
2023-03-04 11:46:07 +08:00
Vladimir Mandic
f8e219bad9
allow api requests to specify do not send images in response 2023-03-03 09:00:52 -05:00
Vladimir Mandic
23d4fb5bf2
allow saving of images via api 2023-03-03 08:29:10 -05:00
Ju1-js
fc3063d9b9
Remove unnecessary line 2023-03-01 18:25:23 -08:00
Adam Huganir
b14d8b61bd
version bump for git python due to CVE-2022-24439
required version for CVE-2022-24439 is >= 3.130
2023-03-01 13:07:37 -05:00
Ju1-js
7990ed92be
Slash was facing the wrong way 2023-02-28 22:05:47 -08:00
Ju1-js
1e30e4d9eb Gradio auth logic fix - Handle empty/newlines
When the massive one-liner was split into multiple lines, it lost the ability to handle newlines. This removes empty strings & newline characters from the logins. It also closes the file so it's more robust if the garbage collection function is ever changed.
2023-02-28 15:55:12 -08:00
Vespinian
c6c2a59333 comment clarification 2023-02-27 23:45:59 -05:00
Vespinian
a39c4cf766 small refactor of api.py 2023-02-27 23:27:33 -05:00
Vespinian
3b6de96467 Added alwayson_script_name and alwayson_script_args to api
Added 2 additional possible entries in the api request: alwayson_script_name, a string list, and, alwayson_script_args, a list of list containing the args of each script. This allows us to send args to always on script and keep backwards compatibility with old script_name and script_arg api params
2023-02-27 21:51:20 -05:00
Vladimir Mandic
3c6459154f
add check for resulting image size 2023-02-27 17:28:04 -05:00
Adam Huganir
6d92d95a33 git 3.1.30 api change 2023-02-25 19:15:06 +00:00
Vladimir Mandic
ed43a822b2
fix progressbar 2023-02-25 12:56:03 -05:00
missionfloyd
aa108bd02a Add lossless webp option 2023-02-24 20:57:18 -07:00
Brad Smith
b15bc73c99
sort upscalers by name 2023-02-24 14:45:08 -05:00
laksjdjf
534cf60afb
Update script_callbacks.py 2023-02-24 14:26:55 +09:00
laksjdjf
9a1435946c
Update sd_samplers_kdiffusion.py 2023-02-24 14:04:23 +09:00
laksjdjf
327186b484
Update script_callbacks.py 2023-02-24 14:03:46 +09:00
Tpinion
ac4c7f05cd Filter out temporary files that will be generated if the download fails. 2023-02-24 00:42:29 +08:00
fkunn1326@users.noreply.github.com
b90cad7f31 Add .mjs support for extensions 2023-02-23 03:29:22 +00:00
Thomas Young
6825de7bc8
Added results selector
This causes the querySelectorAll function to only select images in a results div, ignoring images that might be in an extension's gallery.
2023-02-22 15:31:49 -06:00
112292454
2fa91cbee6
Update prompt_matrix.py
1
2023-02-23 01:55:07 +08:00
112292454
2c58d373dd
Update prompt_matrix.py
this file last commit fixed common situation when using both prompts matrix and high-res。
but if we just open matrix option,but not use ‘|’,we will only get one pic,and `processed.images[0].width, processed.images[1].height` will cause a index out of bounds exception
2023-02-22 21:40:42 +08:00
space-nuko
a2d635ad13 Add before_process_batch script callback 2023-02-22 01:52:53 -08:00
Kilvoctu
32a4c8d961 use emojis for extra network buttons
🔄 for refresh
 for close
2023-02-20 15:14:06 -06:00
xSinStarx
b0f2653541
Fixes img2img Negative Token Counter
The img2img negative token counter is counting the txt2img negative prompt.
2023-02-20 12:39:38 -08:00
EllangoK
bab972ff8a fixes newline being detected as its own entry 2023-02-20 10:16:55 -05:00
AUTOMATIC1111
0cc0ee1bcb
Merge pull request #7945 from w-e-w/fix-image-downscale
Fix broken image downscale TypeError
2023-02-20 14:45:54 +03:00
w-e-w
f71a3c9c3a convert resolution to int using round() 2023-02-20 17:47:20 +09:00
EllangoK
ca2b8faa83 custom height, width settings for extra networks 2023-02-19 14:38:22 -05:00
AUTOMATIC
65995a2ea3 possible fix for #7804 2023-02-19 18:31:51 +03:00
Vladimir Mandic
83829471de
make ui as multiselect instead of string list 2023-02-19 09:21:44 -05:00
Vladimir Mandic
8affa42588
Merge branch 'AUTOMATIC1111:master' into hide_ui_tabs 2023-02-19 09:19:25 -05:00
AUTOMATIC1111
076d624a29
Merge pull request #7933 from w-e-w/fix-auto-sd-download
Fix issue with auto sd1.5 download
2023-02-19 15:04:03 +03:00
w-e-w
014e7323f6 when exists 2023-02-19 20:49:07 +09:00
w-e-w
c77f01ff31 fix auto sd download issue 2023-02-19 20:37:40 +09:00
AUTOMATIC1111
7202213358
Merge pull request #7571 from mezotaken/int_loop
Optionally append interrogated prompt in loopback script
2023-02-19 14:23:31 +03:00
AUTOMATIC
d84f3cf7a7 split #7300 into multiple lines 2023-02-19 13:11:48 +03:00
AUTOMATIC1111
c3d5a6ed37
Merge pull request #7300 from Ju1-js/master
Gradio Auth - Read from External File
2023-02-19 13:02:05 +03:00
AUTOMATIC
11183b4d90 fix for #6700 2023-02-19 12:44:56 +03:00
AUTOMATIC1111
e452facef4
Merge pull request #6700 from Shondoit/weighted-learning
Weighted learning of TIs and HNs
2023-02-19 12:41:35 +03:00
AUTOMATIC1111
dfb3b8f398
Merge branch 'master' into weighted-learning 2023-02-19 12:41:29 +03:00
AUTOMATIC1111
4313777322
Merge pull request #7703 from minux302/hotfix/fix_hn_api_arg
fix arg for train_hypernetwork api
2023-02-19 12:36:01 +03:00
AUTOMATIC1111
75a508ab53
Merge pull request #7430 from Einlar/6866-fix-hires-prompt-matrix
Fix prompt matrix #rows/#cols when using hires
2023-02-19 12:31:06 +03:00
AUTOMATIC1111
cfc9849f3f
Merge branch 'master' into 6866-fix-hires-prompt-matrix 2023-02-19 12:30:58 +03:00
AUTOMATIC1111
d99bd04b3f
Merge pull request #7568 from Klace/XYZ-ImageCFG
Add Image CFG Scale to XYZ Grid
2023-02-19 12:29:26 +03:00
AUTOMATIC1111
e0ced6696e
Merge pull request #7525 from w-e-w/master
Fix: show correct help message on --help
2023-02-19 12:29:09 +03:00
AUTOMATIC1111
09835363ab
Merge pull request #7572 from CurtisDS/fix-id-handle-in-extra-networks
Update ui_extra_networks.py to fix div id's that have spaces in them
2023-02-19 12:26:11 +03:00
AUTOMATIC1111
c46eea221f
Merge pull request #7573 from mezotaken/batch-tooltip
Update batch count/size hints
2023-02-19 12:25:44 +03:00
AUTOMATIC
48d171bbb3 fix incorrectly named args for gr.Slider in prompt matrix and xyz grid 2023-02-19 12:25:05 +03:00
AUTOMATIC
b908bed883 remove unneeded return from #7583 2023-02-19 12:23:40 +03:00
AUTOMATIC1111
563724f6e9
Merge pull request #7583 from EllangoK/master
Calls modules.sd_vae.refresh_vae_list(), fixes VAE list not updating
2023-02-19 12:23:06 +03:00
AUTOMATIC1111
b63a13c5ed
Merge pull request #7651 from vladmandic/missing-imports
Add missing imports
2023-02-19 12:16:58 +03:00
AUTOMATIC1111
e287d9b294
Merge pull request #7650 from vladmandic/img2img-fix
Convert image from RGBA to RGB before saving
2023-02-19 12:15:27 +03:00
AUTOMATIC
fe46a08f52 add slash to non-empty dirs in extra networks interface 2023-02-19 12:09:25 +03:00
AUTOMATIC
66cfd1dcfc Expose xyz_grid's values to other extensions for #7721 2023-02-19 11:45:04 +03:00
AUTOMATIC1111
226bc04653
Merge pull request #7637 from brkirch/fix-hypernetworks-pix2pix
Fix hypernetworks and instruct pix2pix not working with `--upcast-sampling`
2023-02-19 11:15:41 +03:00
AUTOMATIC1111
3fcc087317
Merge pull request #7731 from opparco/master
Add cfg_denoised_callback
2023-02-19 11:09:47 +03:00
AUTOMATIC1111
f04f4b28a2
Merge pull request #7925 from AUTOMATIC1111/revert-7601-aspect_ratio_sliders
Revert "Aspect ratio sliders"
2023-02-19 10:57:34 +03:00
AUTOMATIC1111
fd4ac5187a
Revert "Aspect ratio sliders" 2023-02-19 10:55:39 +03:00
AUTOMATIC1111
b20f28eea9
Merge pull request #7601 from Gerschel/aspect_ratio_sliders
Aspect ratio sliders
2023-02-19 10:53:18 +03:00
AUTOMATIC1111
9c4eaac61f
Merge pull request #7691 from missionfloyd/16bit-convert
Convert 16-bit greyscale to 8-bit when saving as JPEG
2023-02-19 10:15:49 +03:00
AUTOMATIC1111
2a4f893570
Merge pull request #7727 from missionfloyd/face-restore-setting
Fix face restorers setting
2023-02-19 10:14:42 +03:00
AUTOMATIC
fb2354cb2a reword settings for 4chan export, remove unneded try/excepts, add try/except for actually saving JPG 2023-02-19 10:12:45 +03:00
AUTOMATIC1111
e572c3ed38
Merge pull request #7564 from w-e-w/configurable_image_downscale
Configurable JPG downscale threshold
2023-02-19 10:01:42 +03:00
AUTOMATIC
1646991637 display 8 (rather than 7) characters of the extension commit hash in the installed extensions table 2023-02-19 09:54:04 +03:00
AUTOMATIC1111
d023532c55
Merge pull request #7798 from vladmandic/extensions
add version to extensions table
2023-02-19 09:52:19 +03:00
AUTOMATIC
15f4b217b1 fix the a merge conflict resolve i did that entirely breaks image generation 2023-02-19 09:50:14 +03:00
AUTOMATIC1111
a77ac2eeaa
Merge pull request #7730 from CCRcmcpe/fix-dpm-sde-batch
Fix DPM++ SDE not deterministic across different batch sizes (#5210)
2023-02-19 09:38:15 +03:00
AUTOMATIC
a742facd95 make PNG info tab work properly with parameter overrides 2023-02-19 09:30:57 +03:00
AUTOMATIC1111
d7bcc942ff
Merge pull request #7868 from space-nuko/fix-save-params-2
Fix params.txt saving for infotexts modified by process_batch
2023-02-19 09:15:51 +03:00
AUTOMATIC1111
6911deb242
Merge branch 'master' into fix-save-params-2 2023-02-19 09:15:45 +03:00
AUTOMATIC
75e03785fe remove download instruction 2023-02-19 09:12:01 +03:00
AUTOMATIC1111
9f113a84eb
Merge pull request #7850 from asdfire1/instructionfix
Fixed missing part in the Linux installation instructions
2023-02-19 09:11:31 +03:00
AUTOMATIC1111
c69494673d
Merge pull request #7824 from missionfloyd/download-model
Download model if none are found
2023-02-19 09:00:48 +03:00
AUTOMATIC1111
b66b6829aa
Merge pull request #7789 from space-nuko/extra-networks-per-batch
Apply extra networks per-batch instead of per-session (fixes wildcards)
2023-02-19 08:44:40 +03:00
AUTOMATIC
b5f69ad6af simply long version display for torch in UI 2023-02-19 08:38:38 +03:00
AUTOMATIC1111
aa7ddb8b0c
Merge pull request #7911 from vladmandic/torch-version
store and print real torch version
2023-02-19 08:33:40 +03:00
Vladimir Mandic
9c7e6d5bba
store and print real torch version 2023-02-18 11:31:02 -05:00
space-nuko
b20737815a Fix params.txt saving for infotexts modified by process_batch 2023-02-16 21:44:46 -08:00
asdfire1
9691ca5f59
Fixed the Linux installation instructions 2023-02-16 11:59:14 +01:00
missionfloyd
c4ea16a03f Add ".vae.ckpt" to ext_blacklist 2023-02-15 19:47:30 -07:00
Shondoit
edb10092de Add ability to choose using weighted loss or not 2023-02-15 10:03:59 +01:00
Shondoit
bc50936745 Call weighted_forward during training 2023-02-15 10:03:59 +01:00
Shondoit
21642000b3 Add PNG alpha channel as weight maps to data entries 2023-02-15 10:03:59 +01:00
Shondoit
c4bfd20f31 Hijack to add weighted_forward to model: return loss * weight map 2023-02-15 10:03:59 +01:00
RcINS
f55a7e04d8 Fix error when batch count > 1 2023-02-15 16:57:18 +08:00
missionfloyd
1615f786ee Download model if none are found 2023-02-14 20:54:02 -07:00
space-nuko
7df7e4d227 Allow extensions to declare paste fields for "Send to X" buttons 2023-02-14 03:55:42 -08:00
Vladimir Mandic
a320d157ec
all hiding of ui tabs 2023-02-13 20:26:47 -05:00
Vladimir Mandic
7893533674
add version to extensions table 2023-02-13 11:04:34 -05:00
space-nuko
0a4917ac40 Apply extra networks per-batch instead of per-session (fixes wildcards) 2023-02-13 03:33:28 -08:00
missionfloyd
ceb8a4b222
Merge branch 'AUTOMATIC1111:master' into face-restore-setting 2023-02-12 23:31:19 -07:00
missionfloyd
02e52567bc
Merge branch 'AUTOMATIC1111:master' into 16bit-convert 2023-02-12 23:31:08 -07:00
AUTOMATIC1111
3715ece0ad
Merge pull request #7717 from zijiren233/master
Fix: v0.91.0 of fastapi Cannot add middleware after an application ha…
2023-02-13 08:12:51 +03:00
space-nuko
716a69237c support SD2.X models 2023-02-11 06:18:34 -08:00
Gerschel
742d86eed4
Merge pull request #2 from w-e-w/badge_style
remove Badge background and ⚠️ ->📏
2023-02-10 20:44:12 -08:00
opparco
b78c5e87ba Add cfg_denoised_callback 2023-02-11 11:18:38 +09:00
RcINS
9e27af76d1 Fix DPM++ SDE not deterministic across different batch sizes (#5210) 2023-02-11 10:12:16 +08:00
space-nuko
fb274229b2 bug fix 2023-02-10 14:30:35 -08:00
missionfloyd
bf9b1d64a3 Fix face restorers setting 2023-02-10 15:27:08 -07:00
zijiren233
4f4debbadb Fix: v0.91.0 of fastapi Cannot add middleware after an application has started 2023-02-11 00:28:20 +08:00
space-nuko
06cb0dc920 Fix UniPC order 2023-02-10 05:36:41 -08:00
space-nuko
79ffb9453f Add UniPC sampler settings 2023-02-10 05:27:05 -08:00
space-nuko
c88dcc20d4 UniPC does not support img2img (for now) 2023-02-10 05:00:54 -08:00
space-nuko
21880eb9e5 Fix logspam and live previews 2023-02-10 04:47:08 -08:00
space-nuko
1253199889 Working UniPC (for batch size 1) 2023-02-10 03:30:20 -08:00
minux302
33947a3c66 fix arg for hypernetwork train api 2023-02-10 17:58:35 +09:00
missionfloyd
73a97cac11
Use RGB for webp
Doesn't support greyscale (L)
2023-02-09 17:04:55 -07:00
w-e-w
b313221ca6 remove Badge color and ⚠️ ->📏 2023-02-10 08:34:21 +09:00
missionfloyd
463ab84180 Convert 16-bit greyscale to 8-bit when saving as JPEG 2023-02-09 02:13:49 -07:00
Gerschel
374fe636b8 Squashed commit of the following:
commit b030b67ad005bfe29bcda692238a00042dcae816
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Wed Feb 8 16:38:56 2023 -0800

    styling adjustements

commit 80a2acb0230dd77489b0eb466f2efe827a053f6d
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Wed Feb 8 10:49:47 2023 -0800

    badge indicator toggles visibility by selection

commit 898922e025a6422ac947fb45c1fa4f1109882f0a
Merge: 745382a0 31bbfa72
Author: Gerschel <9631031+Gerschel@users.noreply.github.com>
Date:   Wed Feb 8 08:35:26 2023 -0800

    Merge pull request #1 from w-e-w/Rounding-Method

    Rounding Method

commit 31bbfa729a15ef35fa1f905345d3ba2b17b26ab9
Author: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date:   Wed Feb 8 19:41:45 2023 +0900

    use switch

commit 85dbe511c33521d3ac62224bf0e0f3a48194ce63
Author: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date:   Wed Feb 8 16:47:52 2023 +0900

    Rounding Method

commit 745382a0f4b8d16241545a3460d5206915959255
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Tue Feb 7 21:19:20 2023 -0800

    default set to round

commit 728579c618af30ec98a5af0991bd3f28bdaca399
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Tue Feb 7 21:17:03 2023 -0800

    cleaned some commented code out; added indicator

commit 5b288c24a1edd8a5c2f35214b9634316d05b8dae
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Tue Feb 7 18:19:00 2023 -0800

    needs cleaning; attempt at rounding

commit d9f18ae92b929576b0b8c5f1ef8b3b38e441e381
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Tue Feb 7 15:46:25 2023 -0800

    add rounding option in setting for aspect ratio

commit af22106802c9e42205649e4c71c23fcf5b8c62f6
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Tue Feb 7 13:18:45 2023 -0800

    added some ratios, sorted ratios by commonality

commit 11e2fba73cffe8cdbf4cd0860641b94428ca0e74
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Tue Feb 7 10:46:53 2023 -0800

    snaps to mulitples of 8 and along ratio

commit fa00387e07460b10ee82671a1bfa8687e00ee60b
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Mon Feb 6 14:54:59 2023 -0800

    updated slidercomponentcontroller

commit 8059bc111c3e2d1edb3314e05ab21b65120fa1dd
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Mon Feb 6 14:29:11 2023 -0800

    added step size adjustment on number field

commit 641157b9f27a874a24ee7b0a854a092e9eac3eec
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Mon Feb 6 14:12:03 2023 -0800

    added return step size to default when ratio is disabled

commit 5fb75ad28f2476f36100ec93922a8199adbd2a68
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Mon Feb 6 14:09:34 2023 -0800

    added step size adjustment

commit e33532883bc4709cd41c3775cbb646d1d5ab0584
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Mon Feb 6 11:56:15 2023 -0800

    adjusted dropdown size, padding, text-align

commit 81937329cee77f466c5a5b23c268d0c810128f84
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Mon Feb 6 11:39:57 2023 -0800

    added positioning and styling

commit 86eb4583782d92880a9a113a54ffbac9d92f3753
Author: Gerschel <Gerschel_Payne@hotmail.com>
Date:   Mon Feb 6 08:54:45 2023 -0800

    fix typo in defaults; added preventDefault in event
2023-02-08 18:57:32 -08:00
Vladimir Mandic
3ca41dbded
add missing import
used later in line 70
2023-02-08 07:10:13 -05:00
Vladimir Mandic
3ee9ca5cb0
add missing import
used later in line 418
2023-02-08 07:08:09 -05:00
Vladimir Mandic
4c562a9832
convert rgba to rgb
some image format (e.g. jpg) do not support rgba
2023-02-08 07:03:36 -05:00
brkirch
2016733814 Apply hijacks in ddpm_edit for upcast sampling
To avoid import errors, ddpm_edit hijacks are done after an instruct pix2pix model is loaded.
2023-02-07 22:53:45 -05:00
brkirch
4738486d8f Support for hypernetworks with --upcast-sampling 2023-02-06 18:10:55 -05:00
Gerschel
5d483bf307 aspect ratio for dim's; sliders adjust by ratio
Default choices added to settings in user interface section
Choices are editable by user

User selects from dropdown.
When you move one slider, the other adjusts according to the ratio
chosen.
Vice versa for the other slider.

Number fields for changes work as well.

For disabling ratio, an unlock pad "🔓" is available as a default.
This string can be changed to anything to serve as a disable,
as long as there is no colon ":".

Ratios are entered in this format, floats or ints with a colon "1:1".
The string is split at the colon, parses left and right as floats to
perform the math.
2023-02-06 08:18:04 -08:00
EllangoK
9a22c63f47 call modules.sd_vae.refresh_vae_list() 2023-02-06 00:52:31 -05:00
Vladimir Repin
df8ee5f6b0 Update batch count/size hints 2023-02-06 00:52:57 +03:00
CurtisDS
584f782391
Update ui_extra_networks.py
update the string used to build the ID handle to replace spaces with underscore
2023-02-05 16:42:45 -05:00
Vladimir Repin
7dd23973f7 Optionally append interrogated prompt in loopback script 2023-02-06 00:28:31 +03:00
Kyle
67303fd5fc Img2Img Only
Will still show up as an option with regular img2img models, but outputs no changes.
2023-02-05 15:34:26 -05:00
Kyle
c8109f0dea Add Image CFG Scale to XYZ Grid 2023-02-05 15:18:18 -05:00
w-e-w
fe33be6cac use Default if ValueError 2023-02-05 23:33:05 +09:00
w-e-w
6d11cda418 configurable image downscale
allowing the user to configure the image downscale parameters in setting
2023-02-05 23:12:42 +09:00
w-e-w
47b298d58a
Merge branch 'AUTOMATIC1111:master' into master 2023-02-05 22:02:30 +08:00
AUTOMATIC1111
ea9bd9fc74
Merge pull request #7556 from EllangoK/master
Adds options for grid margins to XYZ Plot and Prompt Matrix
2023-02-05 13:34:36 +03:00
EllangoK
0ca1a64cfc adds grid margins to xyz plot and prompt matrix 2023-02-05 03:44:56 -05:00
AUTOMATIC1111
3993aa43e9
Merge pull request #7535 from mcmonkey4eva/fix-symlink-extra-network
fix symlinks in extra networks ui
2023-02-05 11:28:30 +03:00
AUTOMATIC1111
27a50d4b38
Merge pull request #7554 from techneconn/feature/prompt_hash_option
Add prompt_hash option for file/dir name pattern
2023-02-05 11:27:05 +03:00
AUTOMATIC1111
475095f50a
Merge pull request #7528 from spezialspezial/patch-1
Catch broken model symlinks early | Quickfix modelloader.py
2023-02-05 11:24:32 +03:00
AUTOMATIC
668d7e9b9a make it possible to load SD1 checkpoints without CLIP 2023-02-05 11:21:00 +03:00
techneconn
5a1b62e9f8 Add prompt_hash option for file/dir name pattern 2023-02-05 15:48:51 +09:00
Alex "mcmonkey" Goodwin
88a46e8427 fix symlinks in extra networks ui
'absolute' and 'resolve' are equivalent, but 'resolve' resolves symlinks (which is an obscure specialty behavior usually not wanted) whereas 'absolute' treats symlinks as folders (which is the expected behavior). This commit allows you to symlink folders within your models/embeddings/etc. dirs and have preview images load as expected without issue.
2023-02-04 09:10:00 -08:00
spezialspezial
6524478850
Update modelloader.py
os.path.getmtime(filename) throws exception later in codepath when meeting broken symlink. For now catch it here early but more checks could be added for robustness.
2023-02-04 16:52:15 +01:00
w-e-w
dd20fc0fda fix --help
show correct help message
2023-02-04 23:23:20 +09:00
AUTOMATIC
3e0f9a7543 fix issue with switching back to checkpoint that had its checksum calculated during runtime mentioned in #7506 2023-02-04 15:23:16 +03:00
AUTOMATIC
40e51fd6ef add margin parameter to draw_grid_annotations 2023-02-04 13:29:04 +03:00
AUTOMATIC1111
21593c8082
Merge pull request #7466 from ctwrs/master
Add .jpg to allowed thumb formats
2023-02-04 12:07:45 +03:00
AUTOMATIC1111
c0e0b5844d
Merge pull request #7470 from cbrownstein-lambda/update-error-message-no-checkpoint
Update error message WRT missing checkpoint file
2023-02-04 12:07:12 +03:00
AUTOMATIC1111
dca632ab90
Merge pull request #7509 from mezotaken/fix-img2imgalt
Fix img2imgalt after samplers separation
2023-02-04 11:41:29 +03:00
AUTOMATIC
81823407d9 add --no-hashing 2023-02-04 11:38:56 +03:00
AUTOMATIC1111
30228c67ca
Merge pull request #7461 from brkirch/mac-fixes
Move Mac related code to separate file
2023-02-04 11:22:52 +03:00
AUTOMATIC
c4b9ed1a27 make Image CFG Scale only show if instrutpix2pix model is loaded 2023-02-04 11:18:44 +03:00
AUTOMATIC
72dd5785d9 merge CFGDenoiserEdit and CFGDenoiser into single object 2023-02-04 11:06:17 +03:00
brkirch
4306659c4d Remove unused code 2023-02-04 01:22:06 -05:00
AUTOMATIC1111
127bfb6c41
Merge pull request #7481 from Klace/master
img2img instruct-pix2pix support
2023-02-04 09:05:21 +03:00
Kyle
ba6a4e7e94 Use original CFGDenoiser if image_cfg_scale = 1
If image_cfg_scale is =1 then the original image is not used for the output. We can then use the original CFGDenoiser to get the same result to support AND functionality.

Maybe in the future AND can be supported with "Image CFG Scale"
2023-02-03 19:46:13 -05:00
Kyle
c27c0de0f7 txt2img Hires Fix 2023-02-03 19:15:32 -05:00
Kyle
6c6c6636bb Image CFG Added (Full Implementation)
Uses separate denoiser for edit (instruct-pix2pix) models

No impact to txt2img or regular img2img

"Image CFG Scale" will only apply to instruct-pix2pix models and metadata will only be added if using such model
2023-02-03 18:19:56 -05:00
Vladimir Repin
982295aee5 Fix img2imgalt after samplers separation 2023-02-04 01:50:38 +03:00
Kyle
3b2ad20ac1 Processing only, no CFGDenoiser change
Allows instruct-pix2pix
2023-02-02 19:19:45 -05:00
Kyle
cf0cfefe91 Revert "instruct-pix2pix support"
This reverts commit 269833067d.
2023-02-02 19:15:38 -05:00
Kyle
269833067d instruct-pix2pix support 2023-02-02 09:37:01 -05:00
Cody Brownstein
fb97acef63 Update error message WRT missing checkpoint file
The Safetensors format is also supported.
2023-02-01 14:51:06 -08:00
ctwrs
92bae77b88 Add .jpg to allowed thumb formats 2023-02-01 22:28:39 +01:00
Francesco Manzali
5afd9e82c3 Use the real images size, not the process
- Use the width/height of the first image in processed.images
- No more need for rounding in prompt_matrix
2023-02-01 21:16:52 +01:00
brkirch
1b8af15f13 Refactor Mac specific code to a separate file
Move most Mac related code to a separate file, don't even load it unless web UI is run under macOS.
2023-02-01 14:05:56 -05:00
AUTOMATIC1111
226d840e84
Merge pull request #7334 from EllangoK/master
X/Y/Z plot now saves sub grids if opts.grid_save and honors draw_legend
2023-02-01 16:30:28 +03:00
AUTOMATIC1111
07edf57409
Merge pull request #7357 from EllangoK/btn-fix
Fixes switch height/width btn unbound error
2023-02-01 16:29:58 +03:00
AUTOMATIC1111
fa4fe45403
Merge pull request #7371 from hoblin/master
[Prompt Matrix] Support for negative prompt + delimiter selector
2023-02-01 16:28:27 +03:00
AUTOMATIC1111
814600f298
Merge pull request #7412 from Pomierski/master
Fix missing tooltip for 'Clear prompt' button
2023-02-01 16:22:36 +03:00
AUTOMATIC1111
30a64504b1
Merge pull request #7414 from joecodecreations/master
Changes use_original_name_batch to default to True
2023-02-01 16:22:16 +03:00
AUTOMATIC1111
b1873dbb77
Merge pull request #7455 from brkirch/put-fix-back
Refactor MPS PyTorch fixes, add fix still required for PyTorch nightly builds back
2023-02-01 16:11:40 +03:00
brkirch
2217331cd1 Refactor MPS fixes to CondFunc 2023-02-01 06:36:22 -05:00
brkirch
7738c057ce MPS fix is still needed :(
Apparently I did not test with large enough images to trigger the bug with torch.narrow on MPS
2023-02-01 05:23:58 -05:00
Francesco Manzali
17b24e45e8 Fix prompt matrix #rows/#cols when using hires
- images.draw_prompt_matrix() should be called with the final width/height
  of the generated images, after upscaling.
  Otherwise, the number of rows/cols computed in images.draw_grid_annotations
  will increase by the upscaling factor.
- Round the number of cols/rows in images.draw_grid_annotations, since
  the final images width may be a bit less than the required
  hr_upscale_to_x/y
2023-01-31 18:58:36 +01:00
Joey Sanchez
0426b34789 Adding default true to use_original_name_batch as images should by default hold the same name to help keep sequenced images in their correct order 2023-01-30 21:46:52 -05:00
Piotr Pomierski
bfe7e7f15f Fix missing tooltip for 'Clear prompt' button 2023-01-31 01:51:07 +01:00
AUTOMATIC
2c1bb46c7a amend the error in previous commit 2023-01-30 18:48:10 +03:00
AUTOMATIC
19de2a626b make linux launch.py use XFORMERS_PACKAGE var too; thanks, acncagua 2023-01-30 15:48:09 +03:00
AUTOMATIC
ee9fdf7f62 Add --skip-version-check to disable messages asking users to upgrade torch. 2023-01-30 14:56:28 +03:00
AUTOMATIC
aa4688eb83 disable EMA weights for instructpix2pix model, whcih should get memory usage as well as image quality to what it was before d2ac95fa7b 2023-01-30 13:29:44 +03:00
AUTOMATIC
ab059b6e48 make the program read Discard penultimate sigma from generation parameters 2023-01-30 10:52:15 +03:00
AUTOMATIC
040ec7a80e make the program read Eta and Eta DDIM from generation parameters 2023-01-30 10:47:09 +03:00
AUTOMATIC
4df63d2d19 split samplers into one more files for k-diffusion 2023-01-30 10:11:30 +03:00
Andrey
274474105a Split history sd_samplers.py to sd_samplers_kdiffusion.py 2023-01-30 09:51:23 +03:00
Andrey
95916e3777 Split history sd_samplers.py to sd_samplers_kdiffusion.py 2023-01-30 09:51:23 +03:00
Andrey
2db8ed32cd Split history sd_samplers.py to sd_samplers_kdiffusion.py 2023-01-30 09:51:23 +03:00
Andrey
f4d0538bf2 Split history sd_samplers.py to sd_samplers_kdiffusion.py 2023-01-30 09:51:23 +03:00
AUTOMATIC
aa54a9d416 split compvis sampler and shared sampler stuff into their own files 2023-01-30 09:51:06 +03:00
Andrey
f8fcad502e Split history sd_samplers.py to sd_samplers_common.py 2023-01-30 09:37:51 +03:00
Andrey
58ae93b954 Split history sd_samplers.py to sd_samplers_common.py 2023-01-30 09:37:50 +03:00
Andrey
6e78f6a896 Split history sd_samplers.py to sd_samplers_common.py 2023-01-30 09:37:50 +03:00
Andrey
5feae71dd2 Split history sd_samplers.py to sd_samplers_common.py 2023-01-30 09:37:50 +03:00
Andrey
449531a6c5 Split history sd_samplers.py to sd_samplers_compvis.py 2023-01-30 09:35:53 +03:00
Andrey
9b8ed7f8ec Split history sd_samplers.py to sd_samplers_compvis.py 2023-01-30 09:35:53 +03:00
Andrey
9118b08606 Split history sd_samplers.py to sd_samplers_compvis.py 2023-01-30 09:35:52 +03:00
Andrey
0c7c36a6c6 Split history sd_samplers.py to sd_samplers_compvis.py 2023-01-30 09:35:52 +03:00
AUTOMATIC
cbd6329488 add an environment variable for selecting xformers package 2023-01-30 09:12:43 +03:00
AUTOMATIC
c81b52ffbd add override settings component to img2img 2023-01-30 02:40:26 +03:00
AUTOMATIC
847ceae1f7 make it possible to search checkpoint by its hash 2023-01-30 01:41:23 +03:00
AUTOMATIC
399720dac2 update prompt token counts after using the paste params button 2023-01-30 01:03:31 +03:00
AUTOMATIC
f91068f426 change disable_weights_auto_swap to true by default 2023-01-30 00:37:26 +03:00
AUTOMATIC
938578e8a9 make it so that setting options in pasted infotext (like Clip Skip and ENSD) do not get applied directly and instead are added as temporary overrides 2023-01-30 00:25:30 +03:00
Yevhenii Hurin
1e2b10d2dc Cleanup changes made by formatter 2023-01-29 17:14:46 +02:00
Yevhenii Hurin
5997457fd4 Compact options UI for Prompt Matrix 2023-01-29 16:23:29 +02:00
Yevhenii Hurin
edabd92729 Add delimiter selector to the Prompt Matrix script 2023-01-29 16:05:59 +02:00
Yevhenii Hurin
c46f3ad98b Merge branch 'master' of https://github.com/AUTOMATIC1111/stable-diffusion-webui 2023-01-29 15:47:14 +02:00
Yevhenii Hurin
7c53f81caf Prompt selector for Prompt Matrix script 2023-01-29 15:29:03 +02:00
AUTOMATIC
00dab8f10d remove Batch size and Batch pos from textinfo (goodbye) 2023-01-29 11:53:24 +03:00
AUTOMATIC
aa6e55e001 do not display the message for TI unless the list of loaded embeddings changed 2023-01-29 11:53:05 +03:00
EllangoK
920fe8057c fixes #7284 btn unbound error 2023-01-29 03:36:16 -05:00
AUTOMATIC
8d7382ab24 add buttons for auto-search in subdirectories for extra tabs 2023-01-29 11:34:58 +03:00
AUTOMATIC
7cb31a278e initial work on SD2 Lora support 2023-01-29 10:45:46 +03:00
AUTOMATIC1111
e8efd2ec47
Merge pull request #7353 from EllangoK/preview-fix
Fixes thumbnail cards not loading the preview image
2023-01-29 10:41:36 +03:00
EllangoK
659d602dce only returns ckpt directories if they are not none 2023-01-29 02:32:53 -05:00
AUTOMATIC
f6b7768f84 support for searching subdirectory names for extra networks 2023-01-29 10:20:19 +03:00
AUTOMATIC1111
1d24665229
Merge pull request #7344 from glop102/master
Reduce grid rows if larger than number of images available
2023-01-29 09:29:23 +03:00
glop102
09a142a05a Reduce grid rows if larger than number of images available
When a set number of grid rows is specified in settings, then it leads
to situations where an entire row in the grid is empty.
The most noticable example is the processing preview when the row count
is set to 2, where it shows the preview just fine but with a black
rectangle under it.
2023-01-28 19:25:52 -05:00
EllangoK
fb58fa6240 xyz plot now saves sub grids if opts.grid_save
also fixed no draw legend for z grid
2023-01-28 15:37:01 -05:00
AUTOMATIC
0a8515085e make it so that clicking on hypernet/lora card one more time removes the related from the prompt 2023-01-28 23:31:48 +03:00
AUTOMATIC
1d8e06d542 add checkpoints tab for extra networks UI 2023-01-28 22:52:27 +03:00
AUTOMATIC
2abd89acc6 index on master: 91c8d0d Merge pull request #7231 from EllangoK/master 2023-01-28 20:04:35 +03:00
AUTOMATIC1111
91c8d0dcfc
Merge pull request #7231 from EllangoK/master
Fixes X/Y/Z Plot parameters not being restored from images
2023-01-28 18:45:38 +03:00
AUTOMATIC1111
fecb990deb
Merge pull request #7309 from brkirch/fix-embeddings
Fix embeddings, upscalers, and refactor `--upcast-sampling`
2023-01-28 18:44:36 +03:00
AUTOMATIC1111
41e76d1209
Merge pull request #7258 from ItsOlegDm/master
Css fixes
2023-01-28 18:41:58 +03:00
ItsOlegDm
29d2d6a094 Train tab fix 2023-01-28 17:21:59 +02:00
AUTOMATIC
e2c71a4bd4 make prevent the browser from using cached version of scripts when they change 2023-01-28 18:13:03 +03:00
ItsOlegDm
1e22f48f4d img2img styled padding fix 2023-01-28 17:08:38 +02:00
ItsOlegDm
f4eeff659e Removed buttons centering 2023-01-28 17:05:08 +02:00
EllangoK
591b68e56c uses autos new regex, checks len of re_param 2023-01-28 10:04:09 -05:00
AUTOMATIC1111
cd7e8fb42b
Merge pull request #7319 from Thurion/img2img_batch_fix
Fix error when using img2img batch without masks
2023-01-28 17:31:39 +03:00
AUTOMATIC
b7d2af8c7f add dropdowns in settings for hypernets and loras 2023-01-28 17:18:47 +03:00
Thurion
1421e95960
allow empty mask dir 2023-01-28 14:42:24 +01:00
AUTOMATIC
5d14f282c2 fixed a bug where after switching to a checkpoint with unknown hash, you'd get empty space instead of checkpoint name in UI
fixed a bug where if you update a selected checkpoint on disk and then restart the program, a different checkpoint loads, but the name is shown for the the old one.
2023-01-28 16:23:49 +03:00
AUTOMATIC
f8feeaaedb add progressbar to extension update check; do not check for updates for disabled extensions 2023-01-28 15:57:56 +03:00
AUTOMATIC
d04e3e921e automatically detect v-parameterization for SD2 checkpoints 2023-01-28 15:24:41 +03:00
AUTOMATIC
4aa7f5b5b9 update image parameters regex for #7231 2023-01-28 15:24:40 +03:00
brkirch
f9edd578e9 Remove MPS fix no longer needed for PyTorch
The torch.narrow fix was required for nightly PyTorch builds for a while to prevent a hard crash, but newer nightly builds don't have this issue.
2023-01-28 04:16:27 -05:00
brkirch
02b8b957d7 Add --no-half-vae to default macOS arguments
Apparently the version of PyTorch macOS users are currently at doesn't always handle half precision VAEs correctly. We will probably want to update the default PyTorch version to 2.0 when it comes out which should fix that, and at this point nightly builds of PyTorch 2.0 are going to be recommended for most Mac users. Unfortunately someone has already reported that their M2 Mac doesn't work with the nightly PyTorch 2.0 build currently, so we can add --no-half-vae for now and give users that can install nightly PyTorch 2.0 builds a webui-user.sh configuration that overrides the default.
2023-01-28 04:16:27 -05:00
brkirch
ada17dbd7c Refactor conditional casting, fix upscalers 2023-01-28 04:16:25 -05:00
AUTOMATIC1111
e8a41df49f
Merge pull request #7217 from mezotaken/master
Ask user to clarify conditions
2023-01-28 10:52:53 +03:00
AUTOMATIC1111
bea31e849a
Merge pull request #7240 from Unstackd/master
Allow users to convert models to Instruct-pix2pix models by supporting merging Instruct-pix2pix models with regular ones in the merge tab
2023-01-28 10:52:28 +03:00
AUTOMATIC1111
60061eb8d4
Merge pull request #7303 from szhublox/pathshelp
don't replace regular --help with new paths.py parser help
2023-01-28 10:48:33 +03:00
AUTOMATIC
bd52a6d899 some more changes for python version warning; add a commandline flag to disable 2023-01-28 10:48:08 +03:00
Mackerel
3752aad23d don't replace regular --help with new paths.py parser help 2023-01-28 02:44:12 -05:00
AUTOMATIC
7d1f2a3a49 remove waiting for input on version mismatch warning, change supported versions 2023-01-28 10:21:31 +03:00
AUTOMATIC1111
28c4c9b907
Merge pull request #7200 from Spaceginner/master
Add a Python version check
2023-01-28 10:13:56 +03:00
Ju1-js
dc25a31d1a Gradio Auth Read from External File
Usage: `--gradio-auth-path {PATH}`
It adds the credentials to the already existing `--gradio-auth` credentials. It can also handle line breaks.
The file should look like:
`{u1}:{p1},{u2}:{p2}`
or
```
{u1}:{p1},
{u2}:{p2}
```
Will gradio handle duplicate credentials if it happens?
2023-01-27 22:43:10 -08:00
AUTOMATIC1111
ce72af87d3
Merge pull request #7199 from maxaudron/feature/configurable-data-dir
Add flag to store user data sepperate from source code
2023-01-28 09:24:40 +03:00
AUTOMATIC
0834d4ce37 simplify #7284 2023-01-28 08:41:15 +03:00
AUTOMATIC1111
c99d705e57
Merge pull request #7284 from Gazzoo-byte/patch-1
Add button to switch width and height
2023-01-28 08:33:43 +03:00
AUTOMATIC1111
38d83665d9
Merge pull request #7285 from EllangoK/xyz-fixes
Allows for multiple Styles axii in X/Y/Z Plot
2023-01-28 08:31:23 +03:00
AUTOMATIC
4c52dfe4ac make the detection for -v models less broad 2023-01-28 08:30:17 +03:00
AUTOMATIC1111
41975c375c
Merge pull request #7294 from MrCheeze/model-detection
add v2-inpainting model detection, and broaden v-model detection to include anything with 768 in the name
2023-01-28 08:29:01 +03:00
AUTOMATIC1111
8ce0ccf336
Merge pull request #7295 from askaliuk/askaliuk-inpaint-batch-support
Basic inpainting batch support
2023-01-28 08:27:37 +03:00
Andrii Skaliuk
2aac1d9778 Basic inpainting batch support
Modifies batch UI to add optional inpainting support
2023-01-27 17:32:31 -08:00
MrCheeze
6b82efd737 add v2-inpainting model detection, and broaden v-model detection to include anything with 768 in the name 2023-01-27 20:06:19 -05:00
AUTOMATIC
cc8c9b7474 fix broken calls to find_checkpoint_config 2023-01-27 22:43:08 +03:00
EllangoK
32d389ef0f changes remaining text from X/Y -> X/Y/Z 2023-01-27 14:04:23 -05:00
EllangoK
a6a5bfb155 deepcopy pc.styles, allows for multiple style axis 2023-01-27 13:48:39 -05:00
Gazzoo-byte
eafaf14167
Add button to switch width and height
Adds a button to switch width and height, allowing quick and easy switching between landscape and portrait.
2023-01-27 18:34:41 +00:00
Max Audron
23a9d5e273 create user extensions directory if not exists 2023-01-27 14:44:34 +01:00
Max Audron
6b3981c068 clean up unused script_path imports 2023-01-27 14:44:34 +01:00
Max Audron
14c0884fd0 use python importlib to load and execute extension modules
previously module attributes like __file__ where not set correctly,
leading to scripts getting the directory of the stable-diffusion repo
location instead of their own script.

This causes problem when loading user data from an external location
using the --data-dir flag, as extensions would look for their own code
in the stable-diffusion repo location instead of the data dir location.

Using pythons importlib functions sets the modules specs correctly and
executes them. But this will break extensions if they build paths based
on the previously incorrect __file__ attribute.
2023-01-27 14:44:34 +01:00
Max Audron
5eee2ac398 add data-dir flag and set all user data directories based on it 2023-01-27 14:44:30 +01:00
Spaceginner
56c83e453a
Merge remote-tracking branch 'origin/master' 2023-01-27 17:35:54 +05:00
Spaceginner
9ecf1e827c
Made it only a warning 2023-01-27 17:35:24 +05:00
Ivan
63391419c1
Merge branch 'AUTOMATIC1111:master' into master 2023-01-27 17:21:48 +05:00
AUTOMATIC
9beb794e0b clarify the option to disable NaN check. 2023-01-27 13:08:00 +03:00
AUTOMATIC
6f31d2210c support detecting midas model
fix broken api for checkpoint list
2023-01-27 11:54:19 +03:00
AUTOMATIC
d2ac95fa7b remove the need to place configs near models 2023-01-27 11:28:12 +03:00
ItsOlegDm
a43fafb481 css fixes 2023-01-26 23:25:48 +02:00
AUTOMATIC
7a14c8ab45 add an option to enable sections from extras tab in txt2img/img2img
fix some style inconsistenices
2023-01-26 23:31:32 +03:00
ULTRANOX\Chris
cdc2fa209a Changed filename addition from "instrpix2pix" to the more readable ".instruct-pix2pix" for newly generated instruct pix2pix models. 2023-01-26 11:27:07 -05:00
brkirch
c4b9b07db6 Fix embeddings dtype mismatch 2023-01-26 09:00:15 -05:00
AUTOMATIC1111
645f4e7ef8
Merge pull request #7234 from brkirch/fix-full-previews
Fix full previews and--no-half-vae to work correctly with --upcast-sampling
2023-01-26 14:48:43 +03:00
ULTRANOX\Chris
9e72dc7434 Changed all references to "pix2pix" to the more precise name "instruct pix2pix". Also changed extension to instrpix2pix at least for now. 2023-01-26 06:05:40 -05:00
ULTRANOX\Chris
f90798c6b6 Added error check for the rare case a user merges a pix2pix model with a normal model using weighted sum. Also removed bad print message that interfered with merging progress bar. 2023-01-26 04:38:04 -05:00
ULTRANOX\Chris
f4ec411f2c Allow checkpoint merger to merge pix2pix models in the same way that it currently supports inpainting models. 2023-01-26 03:45:16 -05:00
Spaceginner
1619233a74
Only Linux will have max 3.11 2023-01-26 12:52:44 +05:00
brkirch
10421f93c3 Fix full previews, --no-half-vae 2023-01-26 01:43:35 -05:00
EllangoK
4d634dc592 adds components to infotext_fields
allows for loading script params
2023-01-26 00:18:41 -05:00
EllangoK
e57b5f7c55 re_param captures quotes with commas properly
and removes unnecessary regex
2023-01-25 22:36:14 -05:00
Vladimir Repin
d82d471bf7 Ask user to clarify conditions 2023-01-26 02:52:33 +03:00
AUTOMATIC
6cff440182 fix prompt editing break after first batch in img2img 2023-01-25 23:25:40 +03:00
AUTOMATIC
d1d6ce2983 add edit_image_conditioning from my earlier edits in case there's an attempt to inegrate pix2pix properly
this allows to use pix2pix model in img2img though it won't work well this way
2023-01-25 23:25:25 +03:00
AUTOMATIC1111
3cead6983e
Merge pull request #7197 from mcmonkey4eva/fix-ti-symlinks
allow symlinks in the textual inversion embeddings folder
2023-01-25 22:59:12 +03:00
AUTOMATIC1111
a85e22a127
Merge pull request #7201 from brkirch/update-macos-defaults
Update default Mac command line arguments to use --upcast-sampling instead of --no-half
2023-01-25 22:57:17 +03:00
brkirch
e0df864b8c Update arguments to use --upcast-sampling 2023-01-25 13:19:06 -05:00
Spaceginner
f5d73b6a66
Fixed typo 2023-01-25 22:56:09 +05:00
Spaceginner
0cc5f380d5
even more clarifications(?)
i have no idea what commit message should be
2023-01-25 22:41:51 +05:00
Spaceginner
2de99d62dd
some clarification 2023-01-25 22:38:28 +05:00
Ivan
dc0f05c57c
Merge branch 'AUTOMATIC1111:master' into master 2023-01-25 22:34:19 +05:00
Spaceginner
57096823fa
Remove a stacktrace from an assertion to not scare people 2023-01-25 22:33:35 +05:00
AUTOMATIC
15e89ef0f6 fix for unet hijack breaking the train tab 2023-01-25 20:11:01 +03:00
Ivan
2d92d05ca2
Merge branch 'AUTOMATIC1111:master' into master 2023-01-25 22:10:34 +05:00
Spaceginner
e425b9812b
Added Python version check 2023-01-25 22:07:48 +05:00
AUTOMATIC
789d47f832 make clicking extra networks button one more time close the extra networks UI 2023-01-25 19:55:31 +03:00
Alex "mcmonkey" Goodwin
e179b6098a allow symlinks in the textual inversion embeddings folder 2023-01-25 08:48:40 -08:00
AUTOMATIC
635499e832 add pix2pix credits 2023-01-25 19:42:26 +03:00
AUTOMATIC1111
1574e96729
Merge pull request #6510 from brkirch/unet16-upcast-precision
Add upcast options, full precision sampling from float16 UNet and upcasting attention for inference using SD 2.1 models without --no-half
2023-01-25 19:12:29 +03:00
AUTOMATIC1111
1982ef6890
Merge pull request #7138 from mykeehu/patch-4
Fix extra network thumbs label color
2023-01-25 18:59:11 +03:00
AUTOMATIC
57c1baa774 change to code for live preview fix on OSX to be bit more obvious 2023-01-25 18:56:23 +03:00
AUTOMATIC1111
23dafe6d86
Merge pull request #7151 from brkirch/fix-approx-nn
Fix Approx NN previews changing first generation result
2023-01-25 18:48:25 +03:00
AUTOMATIC1111
11485659dc
Merge pull request #7195 from Klace/instruct-pix2pix_model_load
Add instruct-pix2pix hijack
2023-01-25 18:33:15 +03:00
Kyle
bd9b55ee90 Update requirements transformers==4.25.1
Update requirement for transformers to version 4.25.1 to allow instruct-pix2pix demo code to work
2023-01-25 09:41:41 -05:00
Kyle
ee0a0da324 Add instruct-pix2pix hijack
Allows loading instruct-pix2pix models via same method as inpainting models in sd_models.py and sd_hijack_ip2p.py

Adds ddpm_edit.py necessary for instruct-pix2pix
2023-01-25 08:53:23 -05:00
AUTOMATIC1111
d5ce044bcd
Merge pull request #7146 from EllangoK/master
Adds X/Y/Z Grid Script
2023-01-25 11:56:26 +03:00
AUTOMATIC
1bfec873fa add an experimental option to apply loras to outputs rather than inputs 2023-01-25 11:29:46 +03:00
brkirch
e3b53fd295 Add UI setting for upcasting attention to float32
Adds "Upcast cross attention layer to float32" option in Stable Diffusion settings. This allows for generating images using SD 2.1 models without --no-half or xFormers.

In order to make upcasting cross attention layer optimizations possible it is necessary to indent several sections of code in sd_hijack_optimizations.py so that a context manager can be used to disable autocast. Also, even though Stable Diffusion (and Diffusers) only upcast q and k, unfortunately my findings were that most of the cross attention layer optimizations could not function unless v is upcast also.
2023-01-25 01:13:04 -05:00
brkirch
84d9ce30cb Add option for float32 sampling with float16 UNet
This also handles type casting so that ROCm and MPS torch devices work correctly without --no-half. One cast is required for deepbooru in deepbooru_model.py, some explicit casting is required for img2img and inpainting. depth_model can't be converted to float16 or it won't work correctly on some systems (it's known to have issues on MPS) so in sd_models.py model.depth_model is removed for model.half().
2023-01-25 01:13:02 -05:00
EllangoK
ec8774729e swaps xyz axes internally if one costs more 2023-01-24 02:53:35 -05:00
EllangoK
e46bfa5a9e handling sub grids and merging into one 2023-01-24 02:24:32 -05:00
EllangoK
9fc354e130 implements most of xyz grid script 2023-01-24 02:22:40 -05:00
EllangoK
d30ac02f28 renamed xy to xyz grid
this is mostly just so git can detect it properly
2023-01-24 02:21:32 -05:00
brkirch
f64af77adc Fix different first gen with Approx NN previews
The loading of the model for approx nn live previews can change the internal state of PyTorch, resulting in a different image. This can be avoided by preloading the approx nn model in advance.
2023-01-23 22:49:20 -05:00
Mykeehu
82a28bfe35
Fix extra network thumbs label color
Added white color for labels.
2023-01-23 22:36:27 +01:00
105 changed files with 7461 additions and 2324 deletions

View File

@ -37,20 +37,20 @@ body:
id: what-should id: what-should
attributes: attributes:
label: What should have happened? label: What should have happened?
description: tell what you think the normal behavior should be description: Tell what you think the normal behavior should be
validations: validations:
required: true required: true
- type: input - type: input
id: commit id: commit
attributes: attributes:
label: Commit where the problem happens label: Commit where the problem happens
description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit hash** shown in the cmd/terminal when you launch the UI) description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)
validations: validations:
required: true required: true
- type: dropdown - type: dropdown
id: platforms id: platforms
attributes: attributes:
label: What platforms do you use to access UI ? label: What platforms do you use to access the UI ?
multiple: true multiple: true
options: options:
- Windows - Windows
@ -74,10 +74,27 @@ body:
id: cmdargs id: cmdargs
attributes: attributes:
label: Command Line Arguments label: Command Line Arguments
description: Are you using any launching parameters/command line arguments (modified webui-user.py) ? If yes, please write them below description: Are you using any launching parameters/command line arguments (modified webui-user .bat/.sh) ? If yes, please write them below. Write "No" otherwise.
render: Shell render: Shell
validations:
required: true
- type: textarea
id: extensions
attributes:
label: List of extensions
description: Are you using any extensions other than built-ins? If yes, provide a list, you can copy it at "Extensions" tab. Write "No" otherwise.
validations:
required: true
- type: textarea
id: logs
attributes:
label: Console logs
description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after your bug happened. If it's very long, provide a link to pastebin or similar service.
render: Shell
validations:
required: true
- type: textarea - type: textarea
id: misc id: misc
attributes: attributes:
label: Additional information, context and logs label: Additional information
description: Please provide us with any relevant additional info, context or log output. description: Please provide us with any relevant additional info or context.

View File

@ -18,7 +18,7 @@ jobs:
cache-dependency-path: | cache-dependency-path: |
**/requirements*txt **/requirements*txt
- name: Run tests - name: Run tests
run: python launch.py --tests --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test run: python launch.py --tests test --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test
- name: Upload main app stdout-stderr - name: Upload main app stdout-stderr
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
if: always() if: always()

View File

@ -13,11 +13,11 @@ A browser interface based on Gradio library for Stable Diffusion.
- Prompt Matrix - Prompt Matrix
- Stable Diffusion Upscale - Stable Diffusion Upscale
- Attention, specify parts of text that the model should pay more attention to - Attention, specify parts of text that the model should pay more attention to
- a man in a ((tuxedo)) - will pay more attention to tuxedo - a man in a `((tuxedo))` - will pay more attention to tuxedo
- a man in a (tuxedo:1.21) - alternative syntax - a man in a `(tuxedo:1.21)` - alternative syntax
- select text and press ctrl+up or ctrl+down to automatically adjust attention to selected text (code contributed by anonymous user) - select text and press `Ctrl+Up` or `Ctrl+Down` to automatically adjust attention to selected text (code contributed by anonymous user)
- Loopback, run img2img processing multiple times - Loopback, run img2img processing multiple times
- X/Y plot, a way to draw a 2 dimensional plot of images with different parameters - X/Y/Z plot, a way to draw a 3 dimensional plot of images with different parameters
- Textual Inversion - Textual Inversion
- have as many embeddings as you want and use any names you like for them - have as many embeddings as you want and use any names you like for them
- use multiple embeddings with different numbers of vectors per token - use multiple embeddings with different numbers of vectors per token
@ -28,7 +28,7 @@ A browser interface based on Gradio library for Stable Diffusion.
- CodeFormer, face restoration tool as an alternative to GFPGAN - CodeFormer, face restoration tool as an alternative to GFPGAN
- RealESRGAN, neural network upscaler - RealESRGAN, neural network upscaler
- ESRGAN, neural network upscaler with a lot of third party models - ESRGAN, neural network upscaler with a lot of third party models
- SwinIR and Swin2SR([see here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/2092)), neural network upscalers - SwinIR and Swin2SR ([see here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/2092)), neural network upscalers
- LDSR, Latent diffusion super resolution upscaling - LDSR, Latent diffusion super resolution upscaling
- Resizing aspect ratio options - Resizing aspect ratio options
- Sampling method selection - Sampling method selection
@ -46,7 +46,7 @@ A browser interface based on Gradio library for Stable Diffusion.
- drag and drop an image/text-parameters to promptbox - drag and drop an image/text-parameters to promptbox
- Read Generation Parameters Button, loads parameters in promptbox to UI - Read Generation Parameters Button, loads parameters in promptbox to UI
- Settings page - Settings page
- Running arbitrary python code from UI (must run with --allow-code to enable) - Running arbitrary python code from UI (must run with `--allow-code` to enable)
- Mouseover hints for most UI elements - Mouseover hints for most UI elements
- Possible to change defaults/mix/max/step values for UI elements via text config - Possible to change defaults/mix/max/step values for UI elements via text config
- Tiling support, a checkbox to create images that can be tiled like textures - Tiling support, a checkbox to create images that can be tiled like textures
@ -69,7 +69,7 @@ A browser interface based on Gradio library for Stable Diffusion.
- also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2` - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
- No token limit for prompts (original stable diffusion lets you use up to 75 tokens) - No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
- DeepDanbooru integration, creates danbooru style tags for anime prompts - DeepDanbooru integration, creates danbooru style tags for anime prompts
- [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add --xformers to commandline args) - [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add `--xformers` to commandline args)
- via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI - via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI
- Generate forever option - Generate forever option
- Training tab - Training tab
@ -78,11 +78,11 @@ A browser interface based on Gradio library for Stable Diffusion.
- Clip skip - Clip skip
- Hypernetworks - Hypernetworks
- Loras (same as Hypernetworks but more pretty) - Loras (same as Hypernetworks but more pretty)
- A sparate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt. - A sparate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt
- Can select to load a different VAE from settings screen - Can select to load a different VAE from settings screen
- Estimated completion time in progress bar - Estimated completion time in progress bar
- API - API
- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML. - Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML
- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients)) - via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions - [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions
- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions - [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions
@ -91,7 +91,6 @@ A browser interface based on Gradio library for Stable Diffusion.
- Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64 - Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64
- Now with a license! - Now with a license!
- Reorder elements in the UI from settings screen - Reorder elements in the UI from settings screen
-
## Installation and Running ## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
@ -101,11 +100,10 @@ Alternatively, use online services (like Google Colab):
- [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services)
### Automatic Installation on Windows ### Automatic Installation on Windows
1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH" 1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH".
2. Install [git](https://git-scm.com/download/win). 2. Install [git](https://git-scm.com/download/win).
3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. 3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`.
4. Place stable diffusion checkpoint (`model.ckpt`) in the `models/Stable-diffusion` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it). 4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user.
5. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user.
### Automatic Installation on Linux ### Automatic Installation on Linux
1. Install the dependencies: 1. Install the dependencies:
@ -121,7 +119,7 @@ sudo pacman -S wget git python3
```bash ```bash
bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh)
``` ```
3. Run `webui.sh`.
### Installation on Apple Silicon ### Installation on Apple Silicon
Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon).
@ -155,6 +153,9 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al
- Idea for Composable Diffusion - https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch - Idea for Composable Diffusion - https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch
- xformers - https://github.com/facebookresearch/xformers - xformers - https://github.com/facebookresearch/xformers
- DeepDanbooru - interrogator for anime diffusers https://github.com/KichangKim/DeepDanbooru - DeepDanbooru - interrogator for anime diffusers https://github.com/KichangKim/DeepDanbooru
- Sampling in float32 precision from a float16 UNet - marunine for the idea, Birch-san for the example Diffusers implementation (https://github.com/Birch-san/diffusers-play/tree/92feee6)
- Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://github.com/timothybrooks/instruct-pix2pix
- Security advice - RyotaK - Security advice - RyotaK
- UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
- (You) - (You)

View File

@ -0,0 +1,98 @@
# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
# See more details in LICENSE.
model:
base_learning_rate: 1.0e-04
target: modules.models.diffusion.ddpm_edit.LatentDiffusion
params:
linear_start: 0.00085
linear_end: 0.0120
num_timesteps_cond: 1
log_every_t: 200
timesteps: 1000
first_stage_key: edited
cond_stage_key: edit
# image_size: 64
# image_size: 32
image_size: 16
channels: 4
cond_stage_trainable: false # Note: different from the one we trained before
conditioning_key: hybrid
monitor: val/loss_simple_ema
scale_factor: 0.18215
use_ema: false
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 0 ]
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
f_start: [ 1.e-6 ]
f_max: [ 1. ]
f_min: [ 1. ]
unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params:
image_size: 32 # unused
in_channels: 8
out_channels: 4
model_channels: 320
attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ]
num_heads: 8
use_spatial_transformer: True
transformer_depth: 1
context_dim: 768
use_checkpoint: True
legacy: False
first_stage_config:
target: ldm.models.autoencoder.AutoencoderKL
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
double_z: true
z_channels: 4
resolution: 256
in_channels: 3
out_ch: 3
ch: 128
ch_mult:
- 1
- 2
- 4
- 4
num_res_blocks: 2
attn_resolutions: []
dropout: 0.0
lossconfig:
target: torch.nn.Identity
cond_stage_config:
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
data:
target: main.DataModuleFromConfig
params:
batch_size: 128
num_workers: 1
wrap: false
validation:
target: edit_dataset.EditDataset
params:
path: data/clip-filtered-dataset
cache_dir: data/
cache_name: data_10k
split: val
min_text_sim: 0.2
min_image_sim: 0.75
min_direction_sim: 0.2
max_samples_per_prompt: 1
min_resize_res: 512
max_resize_res: 512
crop_res: 512
output_as_edit: False
real_input: True

View File

@ -1,8 +1,7 @@
model: model:
base_learning_rate: 1.0e-4 base_learning_rate: 7.5e-05
target: ldm.models.diffusion.ddpm.LatentDiffusion target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
params: params:
parameterization: "v"
linear_start: 0.00085 linear_start: 0.00085
linear_end: 0.0120 linear_end: 0.0120
num_timesteps_cond: 1 num_timesteps_cond: 1
@ -12,29 +11,36 @@ model:
cond_stage_key: "txt" cond_stage_key: "txt"
image_size: 64 image_size: 64
channels: 4 channels: 4
cond_stage_trainable: false cond_stage_trainable: false # Note: different from the one we trained before
conditioning_key: crossattn conditioning_key: hybrid # important
monitor: val/loss_simple_ema monitor: val/loss_simple_ema
scale_factor: 0.18215 scale_factor: 0.18215
use_ema: False # we set this to false because this is an inference only config finetune_keys: null
scheduler_config: # 10000 warmup steps
target: ldm.lr_scheduler.LambdaLinearScheduler
params:
warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
f_start: [ 1.e-6 ]
f_max: [ 1. ]
f_min: [ 1. ]
unet_config: unet_config:
target: ldm.modules.diffusionmodules.openaimodel.UNetModel target: ldm.modules.diffusionmodules.openaimodel.UNetModel
params: params:
use_checkpoint: True
use_fp16: True
image_size: 32 # unused image_size: 32 # unused
in_channels: 4 in_channels: 9 # 4 data + 4 downscaled image + 1 mask
out_channels: 4 out_channels: 4
model_channels: 320 model_channels: 320
attention_resolutions: [ 4, 2, 1 ] attention_resolutions: [ 4, 2, 1 ]
num_res_blocks: 2 num_res_blocks: 2
channel_mult: [ 1, 2, 4, 4 ] channel_mult: [ 1, 2, 4, 4 ]
num_head_channels: 64 # need to fix for flash-attn num_heads: 8
use_spatial_transformer: True use_spatial_transformer: True
use_linear_in_transformer: True
transformer_depth: 1 transformer_depth: 1
context_dim: 1024 context_dim: 768
use_checkpoint: True
legacy: False legacy: False
first_stage_config: first_stage_config:
@ -43,7 +49,6 @@ model:
embed_dim: 4 embed_dim: 4
monitor: val/rec_loss monitor: val/rec_loss
ddconfig: ddconfig:
#attn_type: "vanilla-xformers"
double_z: true double_z: true
z_channels: 4 z_channels: 4
resolution: 256 resolution: 256
@ -62,7 +67,4 @@ model:
target: torch.nn.Identity target: torch.nn.Identity
cond_stage_config: cond_stage_config:
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
params:
freeze: True
layer: "penultimate"

View File

@ -1,4 +1,4 @@
from modules import extra_networks from modules import extra_networks, shared
import lora import lora
class ExtraNetworkLora(extra_networks.ExtraNetwork): class ExtraNetworkLora(extra_networks.ExtraNetwork):
@ -6,6 +6,12 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
super().__init__('lora') super().__init__('lora')
def activate(self, p, params_list): def activate(self, p, params_list):
additional = shared.opts.sd_lora
if additional != "" and additional in lora.available_loras and len([x for x in params_list if x.items[0] == additional]) == 0:
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
names = [] names = []
multipliers = [] multipliers = []
for params in params_list: for params in params_list:

View File

@ -2,18 +2,34 @@ import glob
import os import os
import re import re
import torch import torch
from typing import Union
from modules import shared, devices, sd_models from modules import shared, devices, sd_models, errors
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
re_digits = re.compile(r"\d+") re_digits = re.compile(r"\d+")
re_unet_down_blocks = re.compile(r"lora_unet_down_blocks_(\d+)_attentions_(\d+)_(.+)") re_x_proj = re.compile(r"(.*)_([qkv]_proj)$")
re_unet_mid_blocks = re.compile(r"lora_unet_mid_block_attentions_(\d+)_(.+)") re_compiled = {}
re_unet_up_blocks = re.compile(r"lora_unet_up_blocks_(\d+)_attentions_(\d+)_(.+)")
re_text_block = re.compile(r"lora_te_text_model_encoder_layers_(\d+)_(.+)") suffix_conversion = {
"attentions": {},
"resnets": {
"conv1": "in_layers_2",
"conv2": "out_layers_3",
"time_emb_proj": "emb_layers_1",
"conv_shortcut": "skip_connection",
}
}
def convert_diffusers_name_to_compvis(key): def convert_diffusers_name_to_compvis(key, is_sd2):
def match(match_list, regex): def match(match_list, regex_text):
regex = re_compiled.get(regex_text)
if regex is None:
regex = re.compile(regex_text)
re_compiled[regex_text] = regex
r = re.match(regex, key) r = re.match(regex, key)
if not r: if not r:
return False return False
@ -24,16 +40,33 @@ def convert_diffusers_name_to_compvis(key):
m = [] m = []
if match(m, re_unet_down_blocks): if match(m, r"lora_unet_down_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[1]}_1_{m[2]}" suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
if match(m, re_unet_mid_blocks): if match(m, r"lora_unet_mid_block_(attentions|resnets)_(\d+)_(.+)"):
return f"diffusion_model_middle_block_1_{m[1]}" suffix = suffix_conversion.get(m[0], {}).get(m[2], m[2])
return f"diffusion_model_middle_block_{1 if m[0] == 'attentions' else m[1] * 2}_{suffix}"
if match(m, re_unet_up_blocks): if match(m, r"lora_unet_up_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
return f"diffusion_model_output_blocks_{m[0] * 3 + m[1]}_1_{m[2]}" suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
return f"diffusion_model_output_blocks_{m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
if match(m, r"lora_unet_down_blocks_(\d+)_downsamplers_0_conv"):
return f"diffusion_model_input_blocks_{3 + m[0] * 3}_0_op"
if match(m, r"lora_unet_up_blocks_(\d+)_upsamplers_0_conv"):
return f"diffusion_model_output_blocks_{2 + m[0] * 3}_{2 if m[0]>0 else 1}_conv"
if match(m, r"lora_te_text_model_encoder_layers_(\d+)_(.+)"):
if is_sd2:
if 'mlp_fc1' in m[1]:
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"
elif 'mlp_fc2' in m[1]:
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"
else:
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"
if match(m, re_text_block):
return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}" return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"
return key return key
@ -43,6 +76,23 @@ class LoraOnDisk:
def __init__(self, name, filename): def __init__(self, name, filename):
self.name = name self.name = name
self.filename = filename self.filename = filename
self.metadata = {}
_, ext = os.path.splitext(filename)
if ext.lower() == ".safetensors":
try:
self.metadata = sd_models.read_metadata_from_safetensors(filename)
except Exception as e:
errors.display(e, f"reading lora {filename}")
if self.metadata:
m = {}
for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)):
m[k] = v
self.metadata = m
self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text
class LoraModule: class LoraModule:
@ -82,15 +132,22 @@ def load_lora(name, filename):
sd = sd_models.read_state_dict(filename) sd = sd_models.read_state_dict(filename)
keys_failed_to_match = [] keys_failed_to_match = {}
is_sd2 = 'model_transformer_resblocks' in shared.sd_model.lora_layer_mapping
for key_diffusers, weight in sd.items(): for key_diffusers, weight in sd.items():
fullkey = convert_diffusers_name_to_compvis(key_diffusers) key_diffusers_without_lora_parts, lora_key = key_diffusers.split(".", 1)
key, lora_key = fullkey.split(".", 1) key = convert_diffusers_name_to_compvis(key_diffusers_without_lora_parts, is_sd2)
sd_module = shared.sd_model.lora_layer_mapping.get(key, None) sd_module = shared.sd_model.lora_layer_mapping.get(key, None)
if sd_module is None: if sd_module is None:
keys_failed_to_match.append(key_diffusers) m = re_x_proj.match(key)
if m:
sd_module = shared.sd_model.lora_layer_mapping.get(m.group(1), None)
if sd_module is None:
keys_failed_to_match[key_diffusers] = key
continue continue
lora_module = lora.modules.get(key, None) lora_module = lora.modules.get(key, None)
@ -104,15 +161,21 @@ def load_lora(name, filename):
if type(sd_module) == torch.nn.Linear: if type(sd_module) == torch.nn.Linear:
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False) module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
elif type(sd_module) == torch.nn.modules.linear.NonDynamicallyQuantizableLinear:
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
elif type(sd_module) == torch.nn.MultiheadAttention:
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
elif type(sd_module) == torch.nn.Conv2d: elif type(sd_module) == torch.nn.Conv2d:
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False) module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
else: else:
print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
continue
assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}' assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}'
with torch.no_grad(): with torch.no_grad():
module.weight.copy_(weight) module.weight.copy_(weight)
module.to(device=devices.device, dtype=devices.dtype) module.to(device=devices.cpu, dtype=devices.dtype)
if lora_key == "lora_up.weight": if lora_key == "lora_up.weight":
lora_module.up = module lora_module.up = module
@ -158,25 +221,120 @@ def load_loras(names, multipliers=None):
loaded_loras.append(lora) loaded_loras.append(lora)
def lora_forward(module, input, res): def lora_calc_updown(lora, module, target):
if len(loaded_loras) == 0: with torch.no_grad():
return res up = module.up.weight.to(target.device, dtype=target.dtype)
down = module.down.weight.to(target.device, dtype=target.dtype)
lora_layer_name = getattr(module, 'lora_layer_name', None) if up.shape[2:] == (1, 1) and down.shape[2:] == (1, 1):
for lora in loaded_loras: updown = (up.squeeze(2).squeeze(2) @ down.squeeze(2).squeeze(2)).unsqueeze(2).unsqueeze(3)
module = lora.modules.get(lora_layer_name, None) else:
if module is not None: updown = up @ down
res = res + module.up(module.down(input)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
return res updown = updown * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
return updown
def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
"""
Applies the currently selected set of Loras to the weights of torch layer self.
If weights already have this particular set of loras applied, does nothing.
If not, restores orginal weights from backup and alters weights according to loras.
"""
lora_layer_name = getattr(self, 'lora_layer_name', None)
if lora_layer_name is None:
return
current_names = getattr(self, "lora_current_names", ())
wanted_names = tuple((x.name, x.multiplier) for x in loaded_loras)
weights_backup = getattr(self, "lora_weights_backup", None)
if weights_backup is None:
if isinstance(self, torch.nn.MultiheadAttention):
weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True))
else:
weights_backup = self.weight.to(devices.cpu, copy=True)
self.lora_weights_backup = weights_backup
if current_names != wanted_names:
if weights_backup is not None:
if isinstance(self, torch.nn.MultiheadAttention):
self.in_proj_weight.copy_(weights_backup[0])
self.out_proj.weight.copy_(weights_backup[1])
else:
self.weight.copy_(weights_backup)
for lora in loaded_loras:
module = lora.modules.get(lora_layer_name, None)
if module is not None and hasattr(self, 'weight'):
self.weight += lora_calc_updown(lora, module, self.weight)
continue
module_q = lora.modules.get(lora_layer_name + "_q_proj", None)
module_k = lora.modules.get(lora_layer_name + "_k_proj", None)
module_v = lora.modules.get(lora_layer_name + "_v_proj", None)
module_out = lora.modules.get(lora_layer_name + "_out_proj", None)
if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
updown_q = lora_calc_updown(lora, module_q, self.in_proj_weight)
updown_k = lora_calc_updown(lora, module_k, self.in_proj_weight)
updown_v = lora_calc_updown(lora, module_v, self.in_proj_weight)
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
self.in_proj_weight += updown_qkv
self.out_proj.weight += lora_calc_updown(lora, module_out, self.out_proj.weight)
continue
if module is None:
continue
print(f'failed to calculate lora weights for layer {lora_layer_name}')
setattr(self, "lora_current_names", wanted_names)
def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
setattr(self, "lora_current_names", ())
setattr(self, "lora_weights_backup", None)
def lora_Linear_forward(self, input): def lora_Linear_forward(self, input):
return lora_forward(self, input, torch.nn.Linear_forward_before_lora(self, input)) lora_apply_weights(self)
return torch.nn.Linear_forward_before_lora(self, input)
def lora_Linear_load_state_dict(self, *args, **kwargs):
lora_reset_cached_weight(self)
return torch.nn.Linear_load_state_dict_before_lora(self, *args, **kwargs)
def lora_Conv2d_forward(self, input): def lora_Conv2d_forward(self, input):
return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora(self, input)) lora_apply_weights(self)
return torch.nn.Conv2d_forward_before_lora(self, input)
def lora_Conv2d_load_state_dict(self, *args, **kwargs):
lora_reset_cached_weight(self)
return torch.nn.Conv2d_load_state_dict_before_lora(self, *args, **kwargs)
def lora_MultiheadAttention_forward(self, *args, **kwargs):
lora_apply_weights(self)
return torch.nn.MultiheadAttention_forward_before_lora(self, *args, **kwargs)
def lora_MultiheadAttention_load_state_dict(self, *args, **kwargs):
lora_reset_cached_weight(self)
return torch.nn.MultiheadAttention_load_state_dict_before_lora(self, *args, **kwargs)
def list_available_loras(): def list_available_loras():
@ -189,7 +347,7 @@ def list_available_loras():
glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.safetensors'), recursive=True) + \ glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.safetensors'), recursive=True) + \
glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.ckpt'), recursive=True) glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.ckpt'), recursive=True)
for filename in sorted(candidates): for filename in sorted(candidates, key=str.lower):
if os.path.isdir(filename): if os.path.isdir(filename):
continue continue

View File

@ -1,14 +1,19 @@
import torch import torch
import gradio as gr
import lora import lora
import extra_networks_lora import extra_networks_lora
import ui_extra_networks_lora import ui_extra_networks_lora
from modules import script_callbacks, ui_extra_networks, extra_networks from modules import script_callbacks, ui_extra_networks, extra_networks, shared
def unload(): def unload():
torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora
torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_lora
torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_lora torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_lora
torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_lora
torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_lora
torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_lora
def before_ui(): def before_ui():
@ -19,12 +24,33 @@ def before_ui():
if not hasattr(torch.nn, 'Linear_forward_before_lora'): if not hasattr(torch.nn, 'Linear_forward_before_lora'):
torch.nn.Linear_forward_before_lora = torch.nn.Linear.forward torch.nn.Linear_forward_before_lora = torch.nn.Linear.forward
if not hasattr(torch.nn, 'Linear_load_state_dict_before_lora'):
torch.nn.Linear_load_state_dict_before_lora = torch.nn.Linear._load_from_state_dict
if not hasattr(torch.nn, 'Conv2d_forward_before_lora'): if not hasattr(torch.nn, 'Conv2d_forward_before_lora'):
torch.nn.Conv2d_forward_before_lora = torch.nn.Conv2d.forward torch.nn.Conv2d_forward_before_lora = torch.nn.Conv2d.forward
if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_lora'):
torch.nn.Conv2d_load_state_dict_before_lora = torch.nn.Conv2d._load_from_state_dict
if not hasattr(torch.nn, 'MultiheadAttention_forward_before_lora'):
torch.nn.MultiheadAttention_forward_before_lora = torch.nn.MultiheadAttention.forward
if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_lora'):
torch.nn.MultiheadAttention_load_state_dict_before_lora = torch.nn.MultiheadAttention._load_from_state_dict
torch.nn.Linear.forward = lora.lora_Linear_forward torch.nn.Linear.forward = lora.lora_Linear_forward
torch.nn.Linear._load_from_state_dict = lora.lora_Linear_load_state_dict
torch.nn.Conv2d.forward = lora.lora_Conv2d_forward torch.nn.Conv2d.forward = lora.lora_Conv2d_forward
torch.nn.Conv2d._load_from_state_dict = lora.lora_Conv2d_load_state_dict
torch.nn.MultiheadAttention.forward = lora.lora_MultiheadAttention_forward
torch.nn.MultiheadAttention._load_from_state_dict = lora.lora_MultiheadAttention_load_state_dict
script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules) script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules)
script_callbacks.on_script_unloaded(unload) script_callbacks.on_script_unloaded(unload)
script_callbacks.on_before_ui(before_ui) script_callbacks.on_before_ui(before_ui)
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
"sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": [""] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras),
}))

View File

@ -15,20 +15,15 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
def list_items(self): def list_items(self):
for name, lora_on_disk in lora.available_loras.items(): for name, lora_on_disk in lora.available_loras.items():
path, ext = os.path.splitext(lora_on_disk.filename) path, ext = os.path.splitext(lora_on_disk.filename)
previews = [path + ".png", path + ".preview.png"]
preview = None
for file in previews:
if os.path.isfile(file):
preview = "./file=" + file.replace('\\', '/') + "?mtime=" + str(os.path.getmtime(file))
break
yield { yield {
"name": name, "name": name,
"filename": path, "filename": path,
"preview": preview, "preview": self.find_preview(path),
"description": self.find_description(path),
"search_term": self.search_terms_from_path(lora_on_disk.filename),
"prompt": json.dumps(f"<lora:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"), "prompt": json.dumps(f"<lora:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
"local_preview": path + ".png", "local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": json.dumps(lora_on_disk.metadata, indent=4) if lora_on_disk.metadata else None,
} }
def allowed_directories_for_previews(self): def allowed_directories_for_previews(self):

View File

@ -89,22 +89,15 @@ function checkBrackets(evt, textArea, counterElt) {
function setupBracketChecking(id_prompt, id_counter){ function setupBracketChecking(id_prompt, id_counter){
var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
var counter = gradioApp().getElementById(id_counter) var counter = gradioApp().getElementById(id_counter)
textarea.addEventListener("input", function(evt){ textarea.addEventListener("input", function(evt){
checkBrackets(evt, textarea, counter) checkBrackets(evt, textarea, counter)
}); });
} }
var shadowRootLoaded = setInterval(function() { onUiLoaded(function(){
var shadowRoot = document.querySelector('gradio-app').shadowRoot;
if(! shadowRoot) return false;
var shadowTextArea = shadowRoot.querySelectorAll('#txt2img_prompt > label > textarea');
if(shadowTextArea.length < 1) return false;
clearInterval(shadowRootLoaded);
setupBracketChecking('txt2img_prompt', 'txt2img_token_counter') setupBracketChecking('txt2img_prompt', 'txt2img_token_counter')
setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter') setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter')
setupBracketChecking('img2img_prompt', 'imgimg_token_counter') setupBracketChecking('img2img_prompt', 'img2img_token_counter')
setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter') setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter')
}, 1000); })

View File

@ -1,11 +1,15 @@
<div class='card' {preview_html} onclick={card_clicked}> <div class='card' style={style} onclick={card_clicked}>
{metadata_button}
<div class='actions'> <div class='actions'>
<div class='additional'> <div class='additional'>
<ul> <ul>
<a href="#" title="replace preview image with currently selected in gallery" onclick={save_card_preview}>replace preview</a> <a href="#" title="replace preview image with currently selected in gallery" onclick={save_card_preview}>replace preview</a>
</ul> </ul>
<span style="display:none" class='search_term'>{search_term}</span>
</div> </div>
<span class='name'>{name}</span> <span class='name'>{name}</span>
<span class='description'>{description}</span>
</div> </div>
</div> </div>

View File

@ -417,3 +417,248 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
</pre> </pre>
<h2><a href="https://github.com/huggingface/diffusers/blob/c7da8fd23359a22d0df2741688b5b4f33c26df21/LICENSE">Scaled Dot Product Attention</a></h2>
<small>Some small amounts of code borrowed and reworked.</small>
<pre>
Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
</pre>
<h2><a href="https://github.com/explosion/curated-transformers/blob/main/LICENSE">Curated transformers</a></h2>
<small>The MPS workaround for nn.Linear on macOS 13.2.X is based on the MPS workaround for nn.Linear created by danieldk for Curated transformers</small>
<pre>
The MIT License (MIT)
Copyright (C) 2021 ExplosionAI GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
</pre>

View File

@ -12,7 +12,7 @@ function dimensionChange(e, is_width, is_height){
currentHeight = e.target.value*1.0 currentHeight = e.target.value*1.0
} }
var inImg2img = Boolean(gradioApp().querySelector("button.rounded-t-lg.border-gray-200")) var inImg2img = gradioApp().querySelector("#tab_img2img").style.display == "block";
if(!inImg2img){ if(!inImg2img){
return; return;
@ -22,7 +22,7 @@ function dimensionChange(e, is_width, is_height){
var tabIndex = get_tab_index('mode_img2img') var tabIndex = get_tab_index('mode_img2img')
if(tabIndex == 0){ // img2img if(tabIndex == 0){ // img2img
targetElement = gradioApp().querySelector('div[data-testid=image] img'); targetElement = gradioApp().querySelector('#img2img_image div[data-testid=image] img');
} else if(tabIndex == 1){ //Sketch } else if(tabIndex == 1){ //Sketch
targetElement = gradioApp().querySelector('#img2img_sketch div[data-testid=image] img'); targetElement = gradioApp().querySelector('#img2img_sketch div[data-testid=image] img');
} else if(tabIndex == 2){ // Inpaint } else if(tabIndex == 2){ // Inpaint
@ -30,7 +30,7 @@ function dimensionChange(e, is_width, is_height){
} else if(tabIndex == 3){ // Inpaint sketch } else if(tabIndex == 3){ // Inpaint sketch
targetElement = gradioApp().querySelector('#inpaint_sketch div[data-testid=image] img'); targetElement = gradioApp().querySelector('#inpaint_sketch div[data-testid=image] img');
} }
if(targetElement){ if(targetElement){
@ -38,7 +38,7 @@ function dimensionChange(e, is_width, is_height){
if(!arPreviewRect){ if(!arPreviewRect){
arPreviewRect = document.createElement('div') arPreviewRect = document.createElement('div')
arPreviewRect.id = "imageARPreview"; arPreviewRect.id = "imageARPreview";
gradioApp().getRootNode().appendChild(arPreviewRect) gradioApp().appendChild(arPreviewRect)
} }
@ -91,23 +91,26 @@ onUiUpdate(function(){
if(arPreviewRect){ if(arPreviewRect){
arPreviewRect.style.display = 'none'; arPreviewRect.style.display = 'none';
} }
var inImg2img = Boolean(gradioApp().querySelector("button.rounded-t-lg.border-gray-200")) var tabImg2img = gradioApp().querySelector("#tab_img2img");
if(inImg2img){ if (tabImg2img) {
let inputs = gradioApp().querySelectorAll('input'); var inImg2img = tabImg2img.style.display == "block";
inputs.forEach(function(e){ if(inImg2img){
var is_width = e.parentElement.id == "img2img_width" let inputs = gradioApp().querySelectorAll('input');
var is_height = e.parentElement.id == "img2img_height" inputs.forEach(function(e){
var is_width = e.parentElement.id == "img2img_width"
var is_height = e.parentElement.id == "img2img_height"
if((is_width || is_height) && !e.classList.contains('scrollwatch')){ if((is_width || is_height) && !e.classList.contains('scrollwatch')){
e.addEventListener('input', function(e){dimensionChange(e, is_width, is_height)} ) e.addEventListener('input', function(e){dimensionChange(e, is_width, is_height)} )
e.classList.add('scrollwatch') e.classList.add('scrollwatch')
} }
if(is_width){ if(is_width){
currentWidth = e.value*1.0 currentWidth = e.value*1.0
} }
if(is_height){ if(is_height){
currentHeight = e.value*1.0 currentHeight = e.value*1.0
} }
}) })
} }
}
}); });

View File

@ -43,7 +43,7 @@ contextMenuInit = function(){
}) })
gradioApp().getRootNode().appendChild(contextMenu) gradioApp().appendChild(contextMenu)
let menuWidth = contextMenu.offsetWidth + 4; let menuWidth = contextMenu.offsetWidth + 4;
let menuHeight = contextMenu.offsetHeight + 4; let menuHeight = contextMenu.offsetHeight + 4;

View File

@ -1,6 +1,6 @@
function keyupEditAttention(event){ function keyupEditAttention(event){
let target = event.originalTarget || event.composedPath()[0]; let target = event.originalTarget || event.composedPath()[0];
if (!target.matches("[id*='_toprow'] textarea.gr-text-input[placeholder]")) return; if (! target.matches("[id*='_toprow'] [id*='_prompt'] textarea")) return;
if (! (event.metaKey || event.ctrlKey)) return; if (! (event.metaKey || event.ctrlKey)) return;
let isPlus = event.key == "ArrowUp" let isPlus = event.key == "ArrowUp"

View File

@ -1,7 +1,8 @@
function extensions_apply(_, _){ function extensions_apply(_, _, disable_all){
disable = [] var disable = []
update = [] var update = []
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){ gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){
if(x.name.startsWith("enable_") && ! x.checked) if(x.name.startsWith("enable_") && ! x.checked)
disable.push(x.name.substr(7)) disable.push(x.name.substr(7))
@ -12,15 +13,28 @@ function extensions_apply(_, _){
restart_reload() restart_reload()
return [JSON.stringify(disable), JSON.stringify(update)] return [JSON.stringify(disable), JSON.stringify(update), disable_all]
} }
function extensions_check(){ function extensions_check(_, _){
var disable = []
gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){
if(x.name.startsWith("enable_") && ! x.checked)
disable.push(x.name.substr(7))
})
gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){ gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){
x.innerHTML = "Loading..." x.innerHTML = "Loading..."
}) })
return []
var id = randomId()
requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function(){
})
return [id, JSON.stringify(disable)]
} }
function install_extension_from_index(button, url){ function install_extension_from_index(button, url){

View File

@ -5,18 +5,16 @@ function setupExtraNetworksForTab(tabname){
var tabs = gradioApp().querySelector('#'+tabname+'_extra_tabs > div') var tabs = gradioApp().querySelector('#'+tabname+'_extra_tabs > div')
var search = gradioApp().querySelector('#'+tabname+'_extra_search textarea') var search = gradioApp().querySelector('#'+tabname+'_extra_search textarea')
var refresh = gradioApp().getElementById(tabname+'_extra_refresh') var refresh = gradioApp().getElementById(tabname+'_extra_refresh')
var close = gradioApp().getElementById(tabname+'_extra_close')
search.classList.add('search') search.classList.add('search')
tabs.appendChild(search) tabs.appendChild(search)
tabs.appendChild(refresh) tabs.appendChild(refresh)
tabs.appendChild(close)
search.addEventListener("input", function(evt){ search.addEventListener("input", function(evt){
searchTerm = search.value.toLowerCase() searchTerm = search.value.toLowerCase()
gradioApp().querySelectorAll('#'+tabname+'_extra_tabs div.card').forEach(function(elem){ gradioApp().querySelectorAll('#'+tabname+'_extra_tabs div.card').forEach(function(elem){
text = elem.querySelector('.name').textContent.toLowerCase() text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase()
elem.style.display = text.indexOf(searchTerm) == -1 ? "none" : "" elem.style.display = text.indexOf(searchTerm) == -1 ? "none" : ""
}) })
}); });
@ -48,10 +46,39 @@ function setupExtraNetworks(){
onUiLoaded(setupExtraNetworks) onUiLoaded(setupExtraNetworks)
var re_extranet = /<([^:]+:[^:]+):[\d\.]+>/;
var re_extranet_g = /\s+<([^:]+:[^:]+):[\d\.]+>/g;
function tryToRemoveExtraNetworkFromPrompt(textarea, text){
var m = text.match(re_extranet)
if(! m) return false
var partToSearch = m[1]
var replaced = false
var newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found, index){
m = found.match(re_extranet);
if(m[1] == partToSearch){
replaced = true;
return ""
}
return found;
})
if(replaced){
textarea.value = newTextareaText
return true;
}
return false
}
function cardClicked(tabname, textToAdd, allowNegativePrompt){ function cardClicked(tabname, textToAdd, allowNegativePrompt){
var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea") var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea")
textarea.value = textarea.value + " " + textToAdd if(! tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)){
textarea.value = textarea.value + opts.extra_networks_add_text_separator + textToAdd
}
updateInput(textarea) updateInput(textarea)
} }
@ -67,3 +94,86 @@ function saveCardPreview(event, tabname, filename){
event.stopPropagation() event.stopPropagation()
event.preventDefault() event.preventDefault()
} }
function extraNetworksSearchButton(tabs_id, event){
searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea')
button = event.target
text = button.classList.contains("search-all") ? "" : button.textContent.trim()
searchTextarea.value = text
updateInput(searchTextarea)
}
var globalPopup = null;
var globalPopupInner = null;
function popup(contents){
if(! globalPopup){
globalPopup = document.createElement('div')
globalPopup.onclick = function(){ globalPopup.style.display = "none"; };
globalPopup.classList.add('global-popup');
var close = document.createElement('div')
close.classList.add('global-popup-close');
close.onclick = function(){ globalPopup.style.display = "none"; };
close.title = "Close";
globalPopup.appendChild(close)
globalPopupInner = document.createElement('div')
globalPopupInner.onclick = function(event){ event.stopPropagation(); return false; };
globalPopupInner.classList.add('global-popup-inner');
globalPopup.appendChild(globalPopupInner)
gradioApp().appendChild(globalPopup);
}
globalPopupInner.innerHTML = '';
globalPopupInner.appendChild(contents);
globalPopup.style.display = "flex";
}
function extraNetworksShowMetadata(text){
elem = document.createElement('pre')
elem.classList.add('popup-metadata');
elem.textContent = text;
popup(elem);
}
function requestGet(url, data, handler, errorHandler){
var xhr = new XMLHttpRequest();
var args = Object.keys(data).map(function(k){ return encodeURIComponent(k) + '=' + encodeURIComponent(data[k]) }).join('&')
xhr.open("GET", url + "?" + args, true);
xhr.onreadystatechange = function () {
if (xhr.readyState === 4) {
if (xhr.status === 200) {
try {
var js = JSON.parse(xhr.responseText);
handler(js)
} catch (error) {
console.error(error);
errorHandler()
}
} else{
errorHandler()
}
}
};
var js = JSON.stringify(data);
xhr.send(js);
}
function extraNetworksRequestMetadata(event, extraPage, cardName){
showError = function(){ extraNetworksShowMetadata("there was an error getting metadata"); }
requestGet("./sd_extra_networks/metadata", {"page": extraPage, "item": cardName}, function(data){
if(data && data.metadata){
extraNetworksShowMetadata(data.metadata)
} else{
showError()
}
}, showError)
event.stopPropagation()
}

View File

@ -6,10 +6,11 @@ titles = {
"GFPGAN": "Restore low quality faces using GFPGAN neural network", "GFPGAN": "Restore low quality faces using GFPGAN neural network",
"Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps higher than 30-40 does not help", "Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps higher than 30-40 does not help",
"DDIM": "Denoising Diffusion Implicit Models - best at inpainting", "DDIM": "Denoising Diffusion Implicit Models - best at inpainting",
"UniPC": "Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models",
"DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution", "DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution",
"Batch count": "How many batches of images to create", "Batch count": "How many batches of images to create (has no impact on generation performance or VRAM usage)",
"Batch size": "How many image to create in a single batch", "Batch size": "How many image to create in a single batch (increases generation performance at cost of higher VRAM usage)",
"CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results", "CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results",
"Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result", "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result",
"\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time", "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time",
@ -17,11 +18,10 @@ titles = {
"\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.", "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.",
"\u{1f4c2}": "Open images output directory", "\u{1f4c2}": "Open images output directory",
"\u{1f4be}": "Save style", "\u{1f4be}": "Save style",
"\U0001F5D1": "Clear prompt", "\u{1f5d1}\ufe0f": "Clear prompt",
"\u{1f4cb}": "Apply selected styles to current prompt", "\u{1f4cb}": "Apply selected styles to current prompt",
"\u{1f4d2}": "Paste available values into the field", "\u{1f4d2}": "Paste available values into the field",
"\u{1f3b4}": "Show extra networks", "\u{1f3b4}": "Show/hide extra networks",
"Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt", "Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt",
"SD upscale": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back", "SD upscale": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back",
@ -39,8 +39,7 @@ titles = {
"Inpaint at full resolution": "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image", "Inpaint at full resolution": "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image",
"Denoising strength": "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.", "Denoising strength": "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.",
"Denoising strength change factor": "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.",
"Skip": "Stop processing current image and continue processing.", "Skip": "Stop processing current image and continue processing.",
"Interrupt": "Stop processing images and return any results accumulated so far.", "Interrupt": "Stop processing images and return any results accumulated so far.",
"Save": "Write image to a directory (default - log/images) and generation parameters into csv file.", "Save": "Write image to a directory (default - log/images) and generation parameters into csv file.",
@ -50,7 +49,7 @@ titles = {
"None": "Do not do anything special", "None": "Do not do anything special",
"Prompt matrix": "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)", "Prompt matrix": "Separate prompts into parts using vertical pipe character (|) and the script will create a picture for every combination of them (except for the first part, which will be present in all combinations)",
"X/Y plot": "Create a grid where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows", "X/Y/Z plot": "Create grid(s) where images will have different parameters. Use inputs below to specify which parameters will be shared by columns and rows",
"Custom code": "Run Python code. Advanced user only. Must run program with --allow-code for this to work", "Custom code": "Run Python code. Advanced user only. Must run program with --allow-code for this to work",
"Prompt S/R": "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others", "Prompt S/R": "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others",
@ -66,12 +65,14 @@ titles = {
"Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.", "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.",
"Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.", "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
"Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.", "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg],[prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime<Format>], [datetime<Format><Time Zone>], [job_timestamp]; leave empty for default.",
"Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle", "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle",
"Loopback": "Process an image, use it as an input, repeat.", "Loopback": "Performs img2img processing multiple times. Output images are used as input for the next loop.",
"Loops": "How many times to repeat processing an image and using it as input for the next iteration", "Loops": "How many times to process an image. Each output is used as the input of the next loop. If set to 1, behavior will be as if this script were not used.",
"Final denoising strength": "The denoising strength for the final loop of each image in the batch.",
"Denoising strength curve": "The denoising curve controls the rate of denoising strength change each loop. Aggressive: Most of the change will happen towards the start of the loops. Linear: Change will be constant through all loops. Lazy: Most of the change will happen towards the end of the loops.",
"Style 1": "Style to apply; styles have components for both positive and negative prompts and apply to both", "Style 1": "Style to apply; styles have components for both positive and negative prompts and apply to both",
"Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both", "Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both",

View File

@ -11,7 +11,7 @@ function showModal(event) {
if (modalImage.style.display === 'none') { if (modalImage.style.display === 'none') {
lb.style.setProperty('background-image', 'url(' + source.src + ')'); lb.style.setProperty('background-image', 'url(' + source.src + ')');
} }
lb.style.display = "block"; lb.style.display = "flex";
lb.focus() lb.focus()
const tabTxt2Img = gradioApp().getElementById("tab_txt2img") const tabTxt2Img = gradioApp().getElementById("tab_txt2img")
@ -32,13 +32,7 @@ function negmod(n, m) {
function updateOnBackgroundChange() { function updateOnBackgroundChange() {
const modalImage = gradioApp().getElementById("modalImage") const modalImage = gradioApp().getElementById("modalImage")
if (modalImage && modalImage.offsetParent) { if (modalImage && modalImage.offsetParent) {
let allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2") let currentButton = selected_gallery_button();
let currentButton = null
allcurrentButtons.forEach(function(elem) {
if (elem.parentElement.offsetParent) {
currentButton = elem;
}
})
if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) { if (currentButton?.children?.length > 0 && modalImage.src != currentButton.children[0].src) {
modalImage.src = currentButton.children[0].src; modalImage.src = currentButton.children[0].src;
@ -50,22 +44,10 @@ function updateOnBackgroundChange() {
} }
function modalImageSwitch(offset) { function modalImageSwitch(offset) {
var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all") var galleryButtons = all_gallery_buttons();
var galleryButtons = []
allgalleryButtons.forEach(function(elem) {
if (elem.parentElement.offsetParent) {
galleryButtons.push(elem);
}
})
if (galleryButtons.length > 1) { if (galleryButtons.length > 1) {
var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2") var currentButton = selected_gallery_button();
var currentButton = null
allcurrentButtons.forEach(function(elem) {
if (elem.parentElement.offsetParent) {
currentButton = elem;
}
})
var result = -1 var result = -1
galleryButtons.forEach(function(v, i) { galleryButtons.forEach(function(v, i) {
@ -136,37 +118,29 @@ function modalKeyHandler(event) {
} }
} }
function showGalleryImage() { function setupImageForLightbox(e) {
setTimeout(function() { if (e.dataset.modded)
fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain') return;
if (fullImg_preview != null) { e.dataset.modded = true;
fullImg_preview.forEach(function function_name(e) { e.style.cursor='pointer'
if (e.dataset.modded) e.style.userSelect='none'
return;
e.dataset.modded = true;
if(e && e.parentElement.tagName == 'DIV'){
e.style.cursor='pointer'
e.style.userSelect='none'
var isFirefox = isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1 var isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1
// For Firefox, listening on click first switched to next image then shows the lightbox. // For Firefox, listening on click first switched to next image then shows the lightbox.
// If you know how to fix this without switching to mousedown event, please. // If you know how to fix this without switching to mousedown event, please.
// For other browsers the event is click to make it possiblr to drag picture. // For other browsers the event is click to make it possiblr to drag picture.
var event = isFirefox ? 'mousedown' : 'click' var event = isFirefox ? 'mousedown' : 'click'
e.addEventListener(event, function (evt) { e.addEventListener(event, function (evt) {
if(!opts.js_modal_lightbox || evt.button != 0) return; if(!opts.js_modal_lightbox || evt.button != 0) return;
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
evt.preventDefault() modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
showModal(evt) evt.preventDefault()
}, true); showModal(evt)
} }, true);
});
}
}, 100);
} }
function modalZoomSet(modalImage, enable) { function modalZoomSet(modalImage, enable) {
@ -199,21 +173,21 @@ function modalTileImageToggle(event) {
} }
function galleryImageHandler(e) { function galleryImageHandler(e) {
if (e && e.parentElement.tagName == 'BUTTON') { //if (e && e.parentElement.tagName == 'BUTTON') {
e.onclick = showGalleryImage; e.onclick = showGalleryImage;
} //}
} }
onUiUpdate(function() { onUiUpdate(function() {
fullImg_preview = gradioApp().querySelectorAll('img.w-full') fullImg_preview = gradioApp().querySelectorAll('.gradio-gallery > div > img')
if (fullImg_preview != null) { if (fullImg_preview != null) {
fullImg_preview.forEach(galleryImageHandler); fullImg_preview.forEach(setupImageForLightbox);
} }
updateOnBackgroundChange(); updateOnBackgroundChange();
}) })
document.addEventListener("DOMContentLoaded", function() { document.addEventListener("DOMContentLoaded", function() {
const modalFragment = document.createDocumentFragment(); //const modalFragment = document.createDocumentFragment();
const modal = document.createElement('div') const modal = document.createElement('div')
modal.onclick = closeModal; modal.onclick = closeModal;
modal.id = "lightboxModal"; modal.id = "lightboxModal";
@ -277,9 +251,9 @@ document.addEventListener("DOMContentLoaded", function() {
modal.appendChild(modalNext) modal.appendChild(modalNext)
gradioApp().appendChild(modal)
gradioApp().getRootNode().appendChild(modal)
document.body.appendChild(modalFragment); document.body.appendChild(modal);
}); });

View File

@ -15,7 +15,7 @@ onUiUpdate(function(){
} }
} }
const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"][style*="display: block"] img.h-full.w-full.overflow-hidden'); const galleryPreviews = gradioApp().querySelectorAll('div[id^="tab_"][style*="display: block"] div[id$="_results"] .thumbnail-item > img');
if (galleryPreviews == null) return; if (galleryPreviews == null) return;

View File

@ -1,78 +1,13 @@
// code related to showing and updating progressbar shown as the image is being made // code related to showing and updating progressbar shown as the image is being made
galleries = {}
storedGallerySelections = {}
galleryObservers = {}
function rememberGallerySelection(id_gallery){ function rememberGallerySelection(id_gallery){
storedGallerySelections[id_gallery] = getGallerySelectedIndex(id_gallery)
} }
function getGallerySelectedIndex(id_gallery){ function getGallerySelectedIndex(id_gallery){
let galleryButtons = gradioApp().querySelectorAll('#'+id_gallery+' .gallery-item')
let galleryBtnSelected = gradioApp().querySelector('#'+id_gallery+' .gallery-item.\\!ring-2')
let currentlySelectedIndex = -1
galleryButtons.forEach(function(v, i){ if(v==galleryBtnSelected) { currentlySelectedIndex = i } })
return currentlySelectedIndex
} }
// this is a workaround for https://github.com/gradio-app/gradio/issues/2984
function check_gallery(id_gallery){
let gallery = gradioApp().getElementById(id_gallery)
// if gallery has no change, no need to setting up observer again.
if (gallery && galleries[id_gallery] !== gallery){
galleries[id_gallery] = gallery;
if(galleryObservers[id_gallery]){
galleryObservers[id_gallery].disconnect();
}
storedGallerySelections[id_gallery] = -1
galleryObservers[id_gallery] = new MutationObserver(function (){
let galleryButtons = gradioApp().querySelectorAll('#'+id_gallery+' .gallery-item')
let galleryBtnSelected = gradioApp().querySelector('#'+id_gallery+' .gallery-item.\\!ring-2')
let currentlySelectedIndex = getGallerySelectedIndex(id_gallery)
prevSelectedIndex = storedGallerySelections[id_gallery]
storedGallerySelections[id_gallery] = -1
if (prevSelectedIndex !== -1 && galleryButtons.length>prevSelectedIndex && !galleryBtnSelected) {
// automatically re-open previously selected index (if exists)
activeElement = gradioApp().activeElement;
let scrollX = window.scrollX;
let scrollY = window.scrollY;
galleryButtons[prevSelectedIndex].click();
showGalleryImage();
// When the gallery button is clicked, it gains focus and scrolls itself into view
// We need to scroll back to the previous position
setTimeout(function (){
window.scrollTo(scrollX, scrollY);
}, 50);
if(activeElement){
// i fought this for about an hour; i don't know why the focus is lost or why this helps recover it
// if someone has a better solution please by all means
setTimeout(function (){
activeElement.focus({
preventScroll: true // Refocus the element that was focused before the gallery was opened without scrolling to it
})
}, 1);
}
}
})
galleryObservers[id_gallery].observe( gallery, { childList:true, subtree:false })
}
}
onUiUpdate(function(){
check_gallery('txt2img_gallery')
check_gallery('img2img_gallery')
})
function request(url, data, handler, errorHandler){ function request(url, data, handler, errorHandler){
var xhr = new XMLHttpRequest(); var xhr = new XMLHttpRequest();
var url = url; var url = url;
@ -139,7 +74,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre
var divProgress = document.createElement('div') var divProgress = document.createElement('div')
divProgress.className='progressDiv' divProgress.className='progressDiv'
divProgress.style.display = opts.show_progressbar ? "" : "none" divProgress.style.display = opts.show_progressbar ? "block" : "none"
var divInner = document.createElement('div') var divInner = document.createElement('div')
divInner.className='progress' divInner.className='progress'

View File

@ -7,9 +7,31 @@ function set_theme(theme){
} }
} }
function all_gallery_buttons() {
var allGalleryButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnails > .thumbnail-item.thumbnail-small');
var visibleGalleryButtons = [];
allGalleryButtons.forEach(function(elem) {
if (elem.parentElement.offsetParent) {
visibleGalleryButtons.push(elem);
}
})
return visibleGalleryButtons;
}
function selected_gallery_button() {
var allCurrentButtons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery].gradio-gallery .thumbnail-item.thumbnail-small.selected');
var visibleCurrentButton = null;
allCurrentButtons.forEach(function(elem) {
if (elem.parentElement.offsetParent) {
visibleCurrentButton = elem;
}
})
return visibleCurrentButton;
}
function selected_gallery_index(){ function selected_gallery_index(){
var buttons = gradioApp().querySelectorAll('[style="display: block;"].tabitem div[id$=_gallery] .gallery-item') var buttons = all_gallery_buttons();
var button = gradioApp().querySelector('[style="display: block;"].tabitem div[id$=_gallery] .gallery-item.\\!ring-2') var button = selected_gallery_button();
var result = -1 var result = -1
buttons.forEach(function(v, i){ if(v==button) { result = i } }) buttons.forEach(function(v, i){ if(v==button) { result = i } })
@ -18,14 +40,18 @@ function selected_gallery_index(){
} }
function extract_image_from_gallery(gallery){ function extract_image_from_gallery(gallery){
if(gallery.length == 1){ if (gallery.length == 0){
return [gallery[0]] return [null];
}
if (gallery.length == 1){
return [gallery[0]];
} }
index = selected_gallery_index() index = selected_gallery_index()
if (index < 0 || index >= gallery.length){ if (index < 0 || index >= gallery.length){
return [null] // Use the first image in the gallery as the default
index = 0;
} }
return [gallery[index]]; return [gallery[index]];
@ -86,7 +112,7 @@ function get_tab_index(tabId){
var res = 0 var res = 0
gradioApp().getElementById(tabId).querySelector('div').querySelectorAll('button').forEach(function(button, i){ gradioApp().getElementById(tabId).querySelector('div').querySelectorAll('button').forEach(function(button, i){
if(button.className.indexOf('bg-white') != -1) if(button.className.indexOf('selected') != -1)
res = i res = i
}) })
@ -191,6 +217,28 @@ function confirm_clear_prompt(prompt, negative_prompt) {
return [prompt, negative_prompt] return [prompt, negative_prompt]
} }
promptTokecountUpdateFuncs = {}
function recalculatePromptTokens(name){
if(promptTokecountUpdateFuncs[name]){
promptTokecountUpdateFuncs[name]()
}
}
function recalculate_prompts_txt2img(){
recalculatePromptTokens('txt2img_prompt')
recalculatePromptTokens('txt2img_neg_prompt')
return args_to_array(arguments);
}
function recalculate_prompts_img2img(){
recalculatePromptTokens('img2img_prompt')
recalculatePromptTokens('img2img_neg_prompt')
return args_to_array(arguments);
}
opts = {} opts = {}
onUiUpdate(function(){ onUiUpdate(function(){
if(Object.keys(opts).length != 0) return; if(Object.keys(opts).length != 0) return;
@ -232,14 +280,11 @@ onUiUpdate(function(){
return return
} }
prompt.parentElement.insertBefore(counter, prompt) prompt.parentElement.insertBefore(counter, prompt)
counter.classList.add("token-counter")
prompt.parentElement.style.position = "relative" prompt.parentElement.style.position = "relative"
textarea.addEventListener("input", function(){ promptTokecountUpdateFuncs[id] = function(){ update_token_counter(id_button); }
update_token_counter(id_button); textarea.addEventListener("input", promptTokecountUpdateFuncs[id]);
});
} }
registerTextarea('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button') registerTextarea('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button')
@ -273,7 +318,7 @@ onOptionsChanged(function(){
let txt2img_textarea, img2img_textarea = undefined; let txt2img_textarea, img2img_textarea = undefined;
let wait_time = 800 let wait_time = 800
let token_timeout; let token_timeouts = {};
function update_txt2img_tokens(...args) { function update_txt2img_tokens(...args) {
update_token_counter("txt2img_token_button") update_token_counter("txt2img_token_button")
@ -290,9 +335,9 @@ function update_img2img_tokens(...args) {
} }
function update_token_counter(button_id) { function update_token_counter(button_id) {
if (token_timeout) if (token_timeouts[button_id])
clearTimeout(token_timeout); clearTimeout(token_timeouts[button_id]);
token_timeout = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time); token_timeouts[button_id] = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time);
} }
function restart_reload(){ function restart_reload(){
@ -309,3 +354,10 @@ function updateInput(target){
Object.defineProperty(e, "target", {value: target}) Object.defineProperty(e, "target", {value: target})
target.dispatchEvent(e); target.dispatchEvent(e);
} }
var desiredCheckpointName = null;
function selectCheckpoint(name){
desiredCheckpointName = name;
gradioApp().getElementById('change_checkpoint').click()
}

143
launch.py
View File

@ -5,16 +5,56 @@ import sys
import importlib.util import importlib.util
import shlex import shlex
import platform import platform
import argparse
import json import json
dir_repos = "repositories" from modules import cmd_args
dir_extensions = "extensions" from modules.paths_internal import script_path, extensions_dir
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
sys.argv += shlex.split(commandline_args)
args, _ = cmd_args.parser.parse_known_args()
python = sys.executable python = sys.executable
git = os.environ.get('GIT', "git") git = os.environ.get('GIT', "git")
index_url = os.environ.get('INDEX_URL', "") index_url = os.environ.get('INDEX_URL', "")
stored_commit_hash = None stored_commit_hash = None
skip_install = False skip_install = False
dir_repos = "repositories"
if 'GRADIO_ANALYTICS_ENABLED' not in os.environ:
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
def check_python_version():
is_windows = platform.system() == "Windows"
major = sys.version_info.major
minor = sys.version_info.minor
micro = sys.version_info.micro
if is_windows:
supported_minors = [10]
else:
supported_minors = [7, 8, 9, 10, 11]
if not (major == 3 and minor in supported_minors):
import modules.errors
modules.errors.print_error_explanation(f"""
INCOMPATIBLE PYTHON VERSION
This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}.
If you encounter an error with "RuntimeError: Couldn't install torch." message,
or any other error regarding unsuccessful package (library) installation,
please downgrade (or upgrade) to the latest version of 3.10 Python
and delete current Python and "venv" folder in WebUI's directory.
You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3109/
{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
Use --skip-python-version-check to suppress this warning.
""")
def commit_hash(): def commit_hash():
@ -31,23 +71,6 @@ def commit_hash():
return stored_commit_hash return stored_commit_hash
def extract_arg(args, name):
return [x for x in args if x != name], name in args
def extract_opt(args, name):
opt = None
is_present = False
if name in args:
is_present = True
idx = args.index(name)
del args[idx]
if idx < len(args) and args[idx][0] != "-":
opt = args[idx]
del args[idx]
return args, is_present, opt
def run(command, desc=None, errdesc=None, custom_env=None, live=False): def run(command, desc=None, errdesc=None, custom_env=None, live=False):
if desc is not None: if desc is not None:
print(desc) print(desc)
@ -91,7 +114,7 @@ def is_installed(package):
def repo_dir(name): def repo_dir(name):
return os.path.join(dir_repos, name) return os.path.join(script_path, dir_repos, name)
def run_python(code, desc=None, errdesc=None): def run_python(code, desc=None, errdesc=None):
@ -130,7 +153,17 @@ def git_clone(url, dir, name, commithash=None):
if commithash is not None: if commithash is not None:
run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
def git_pull_recursive(dir):
for subdir, _, _ in os.walk(dir):
if os.path.exists(os.path.join(subdir, '.git')):
try:
output = subprocess.check_output([git, '-C', subdir, 'pull', '--autostash'])
print(f"Pulled changes for repository in '{subdir}':\n{output.decode('utf-8').strip()}\n")
except subprocess.CalledProcessError as e:
print(f"Couldn't perform 'git pull' on repository in '{subdir}':\n{e.output.decode('utf-8').strip()}\n")
def version_check(commit): def version_check(commit):
try: try:
import requests import requests
@ -173,16 +206,20 @@ def list_extensions(settings_file):
print(e, file=sys.stderr) print(e, file=sys.stderr)
disabled_extensions = set(settings.get('disabled_extensions', [])) disabled_extensions = set(settings.get('disabled_extensions', []))
disable_all_extensions = settings.get('disable_all_extensions', 'none')
return [x for x in os.listdir(dir_extensions) if x not in disabled_extensions] if disable_all_extensions != 'none':
return []
return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions]
def run_extensions_installers(settings_file): def run_extensions_installers(settings_file):
if not os.path.isdir(dir_extensions): if not os.path.isdir(extensions_dir):
return return
for dirname_extension in list_extensions(settings_file): for dirname_extension in list_extensions(settings_file):
run_extension_installer(os.path.join(dir_extensions, dirname_extension)) run_extension_installer(os.path.join(extensions_dir, dirname_extension))
def prepare_environment(): def prepare_environment():
@ -190,8 +227,8 @@ def prepare_environment():
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117") torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.16rc425')
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b")
@ -202,37 +239,24 @@ def prepare_environment():
codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git') codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git') blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "47b6b607fdd31875c9279cd2f4f16b92e4ea958e") stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "5b3af030dd83e0297272d861c19477735d0317ec") k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "5b3af030dd83e0297272d861c19477735d0317ec")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
sys.argv += shlex.split(commandline_args) if not args.skip_python_version_check:
check_python_version()
parser = argparse.ArgumentParser()
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default='config.json')
args, _ = parser.parse_known_args(sys.argv)
sys.argv, _ = extract_arg(sys.argv, '-f')
sys.argv, skip_torch_cuda_test = extract_arg(sys.argv, '--skip-torch-cuda-test')
sys.argv, reinstall_xformers = extract_arg(sys.argv, '--reinstall-xformers')
sys.argv, reinstall_torch = extract_arg(sys.argv, '--reinstall-torch')
sys.argv, update_check = extract_arg(sys.argv, '--update-check')
sys.argv, run_tests, test_dir = extract_opt(sys.argv, '--tests')
sys.argv, skip_install = extract_arg(sys.argv, '--skip-install')
xformers = '--xformers' in sys.argv
ngrok = '--ngrok' in sys.argv
commit = commit_hash() commit = commit_hash()
print(f"Python {sys.version}") print(f"Python {sys.version}")
print(f"Commit hash: {commit}") print(f"Commit hash: {commit}")
if reinstall_torch or not is_installed("torch") or not is_installed("torchvision"): if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"):
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True) run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
if not skip_torch_cuda_test: if not args.skip_torch_cuda_test:
run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'") run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
if not is_installed("gfpgan"): if not is_installed("gfpgan"):
@ -244,22 +268,22 @@ def prepare_environment():
if not is_installed("open_clip"): if not is_installed("open_clip"):
run_pip(f"install {openclip_package}", "open_clip") run_pip(f"install {openclip_package}", "open_clip")
if (not is_installed("xformers") or reinstall_xformers) and xformers: if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers:
if platform.system() == "Windows": if platform.system() == "Windows":
if platform.python_version().startswith("3.10"): if platform.python_version().startswith("3.10"):
run_pip(f"install -U -I --no-deps xformers==0.0.16rc425", "xformers") run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
else: else:
print("Installation of xformers is not supported in this version of Python.") print("Installation of xformers is not supported in this version of Python.")
print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness") print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
if not is_installed("xformers"): if not is_installed("xformers"):
exit(0) exit(0)
elif platform.system() == "Linux": elif platform.system() == "Linux":
run_pip("install xformers==0.0.16rc425", "xformers") run_pip(f"install {xformers_package}", "xformers")
if not is_installed("pyngrok") and ngrok: if not is_installed("pyngrok") and args.ngrok:
run_pip("install pyngrok", "ngrok") run_pip("install pyngrok", "ngrok")
os.makedirs(dir_repos, exist_ok=True) os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash) git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash) git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
@ -268,21 +292,26 @@ def prepare_environment():
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash) git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
if not is_installed("lpips"): if not is_installed("lpips"):
run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer") run_pip(f"install -r \"{os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\"", "requirements for CodeFormer")
run_pip(f"install -r {requirements_file}", "requirements for Web UI") if not os.path.isfile(requirements_file):
requirements_file = os.path.join(script_path, requirements_file)
run_pip(f"install -r \"{requirements_file}\"", "requirements for Web UI")
run_extensions_installers(settings_file=args.ui_settings_file) run_extensions_installers(settings_file=args.ui_settings_file)
if update_check: if args.update_check:
version_check(commit) version_check(commit)
if args.update_all_extensions:
git_pull_recursive(extensions_dir)
if "--exit" in sys.argv: if "--exit" in sys.argv:
print("Exiting because of --exit argument") print("Exiting because of --exit argument")
exit(0) exit(0)
if run_tests: if args.tests and not args.no_tests:
exitcode = tests(test_dir) exitcode = tests(args.tests)
exit(exitcode) exit(exitcode)
@ -291,16 +320,18 @@ def tests(test_dir):
sys.argv.append("--api") sys.argv.append("--api")
if "--ckpt" not in sys.argv: if "--ckpt" not in sys.argv:
sys.argv.append("--ckpt") sys.argv.append("--ckpt")
sys.argv.append("./test/test_files/empty.pt") sys.argv.append(os.path.join(script_path, "test/test_files/empty.pt"))
if "--skip-torch-cuda-test" not in sys.argv: if "--skip-torch-cuda-test" not in sys.argv:
sys.argv.append("--skip-torch-cuda-test") sys.argv.append("--skip-torch-cuda-test")
if "--disable-nan-check" not in sys.argv: if "--disable-nan-check" not in sys.argv:
sys.argv.append("--disable-nan-check") sys.argv.append("--disable-nan-check")
if "--no-tests" not in sys.argv:
sys.argv.append("--no-tests")
print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}") print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}")
os.environ['COMMANDLINE_ARGS'] = "" os.environ['COMMANDLINE_ARGS'] = ""
with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr: with open(os.path.join(script_path, 'test/stdout.txt'), "w", encoding="utf8") as stdout, open(os.path.join(script_path, 'test/stderr.txt'), "w", encoding="utf8") as stderr:
proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr) proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr)
import test.server_poll import test.server_poll

Binary file not shown.

View File

@ -3,11 +3,15 @@ import io
import time import time
import datetime import datetime
import uvicorn import uvicorn
import gradio as gr
from threading import Lock from threading import Lock
from io import BytesIO from io import BytesIO
from gradio.processing_utils import decode_base64_to_file from gradio.processing_utils import decode_base64_to_file
from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request, Response from fastapi import APIRouter, Depends, FastAPI, Request, Response
from fastapi.security import HTTPBasic, HTTPBasicCredentials from fastapi.security import HTTPBasic, HTTPBasicCredentials
from fastapi.exceptions import HTTPException
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
from secrets import compare_digest from secrets import compare_digest
import modules.shared as shared import modules.shared as shared
@ -18,7 +22,8 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_
from modules.textual_inversion.preprocess import preprocess from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin,Image from PIL import PngImagePlugin,Image
from modules.sd_models import checkpoints_list, find_checkpoint_config from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights
from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models from modules.realesrgan_model import get_realesrgan_models
from modules import devices from modules import devices
from typing import List from typing import List
@ -89,6 +94,16 @@ def encode_pil_to_base64(image):
return base64.b64encode(bytes_data) return base64.b64encode(bytes_data)
def api_middleware(app: FastAPI): def api_middleware(app: FastAPI):
rich_available = True
try:
import anyio # importing just so it can be placed on silent list
import starlette # importing just so it can be placed on silent list
from rich.console import Console
console = Console()
except:
import traceback
rich_available = False
@app.middleware("http") @app.middleware("http")
async def log_and_time(req: Request, call_next): async def log_and_time(req: Request, call_next):
ts = time.time() ts = time.time()
@ -109,6 +124,36 @@ def api_middleware(app: FastAPI):
)) ))
return res return res
def handle_exception(request: Request, e: Exception):
err = {
"error": type(e).__name__,
"detail": vars(e).get('detail', ''),
"body": vars(e).get('body', ''),
"errors": str(e),
}
print(f"API error: {request.method}: {request.url} {err}")
if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions
if rich_available:
console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
else:
traceback.print_exc()
return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))
@app.middleware("http")
async def exception_handling(request: Request, call_next):
try:
return await call_next(request)
except Exception as e:
return handle_exception(request, e)
@app.exception_handler(Exception)
async def fastapi_exception_handler(request: Request, e: Exception):
return handle_exception(request, e)
@app.exception_handler(HTTPException)
async def http_exception_handler(request: Request, e: HTTPException):
return handle_exception(request, e)
class Api: class Api:
def __init__(self, app: FastAPI, queue_lock: Lock): def __init__(self, app: FastAPI, queue_lock: Lock):
@ -149,6 +194,12 @@ class Api:
self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse) self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse)
self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse) self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse)
self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse) self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse)
self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"])
self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"])
self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=ScriptsList)
self.default_script_arg_txt2img = []
self.default_script_arg_img2img = []
def add_api_route(self, path: str, endpoint, **kwargs): def add_api_route(self, path: str, endpoint, **kwargs):
if shared.cmd_opts.api_auth: if shared.cmd_opts.api_auth:
@ -162,47 +213,111 @@ class Api:
raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"}) raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"})
def get_script(self, script_name, script_runner): def get_selectable_script(self, script_name, script_runner):
if script_name is None: if script_name is None or script_name == "":
return None, None return None, None
if not script_runner.scripts:
script_runner.initialize_scripts(False)
ui.create_ui()
script_idx = script_name_to_index(script_name, script_runner.selectable_scripts) script_idx = script_name_to_index(script_name, script_runner.selectable_scripts)
script = script_runner.selectable_scripts[script_idx] script = script_runner.selectable_scripts[script_idx]
return script, script_idx return script, script_idx
def get_scripts_list(self):
t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles]
i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles]
return ScriptsList(txt2img = t2ilist, img2img = i2ilist)
def get_script(self, script_name, script_runner):
if script_name is None or script_name == "":
return None, None
script_idx = script_name_to_index(script_name, script_runner.scripts)
return script_runner.scripts[script_idx]
def init_default_script_args(self, script_runner):
#find max idx from the scripts in runner and generate a none array to init script_args
last_arg_index = 1
for script in script_runner.scripts:
if last_arg_index < script.args_to:
last_arg_index = script.args_to
# None everywhere except position 0 to initialize script args
script_args = [None]*last_arg_index
script_args[0] = 0
# get default values
with gr.Blocks(): # will throw errors calling ui function without this
for script in script_runner.scripts:
if script.ui(script.is_img2img):
ui_default_values = []
for elem in script.ui(script.is_img2img):
ui_default_values.append(elem.value)
script_args[script.args_from:script.args_to] = ui_default_values
return script_args
def init_script_args(self, request, default_script_args, selectable_scripts, selectable_idx, script_runner):
script_args = default_script_args.copy()
# position 0 in script_arg is the idx+1 of the selectable script that is going to be run when using scripts.scripts_*2img.run()
if selectable_scripts:
script_args[selectable_scripts.args_from:selectable_scripts.args_to] = request.script_args
script_args[0] = selectable_idx + 1
# Now check for always on scripts
if request.alwayson_scripts and (len(request.alwayson_scripts) > 0):
for alwayson_script_name in request.alwayson_scripts.keys():
alwayson_script = self.get_script(alwayson_script_name, script_runner)
if alwayson_script == None:
raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found")
# Selectable script in always on script param check
if alwayson_script.alwayson == False:
raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params")
# always on script with no arg should always run so you don't really need to add them to the requests
if "args" in request.alwayson_scripts[alwayson_script_name]:
script_args[alwayson_script.args_from:alwayson_script.args_to] = request.alwayson_scripts[alwayson_script_name]["args"]
return script_args
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
script, script_idx = self.get_script(txt2imgreq.script_name, scripts.scripts_txt2img) script_runner = scripts.scripts_txt2img
if not script_runner.scripts:
script_runner.initialize_scripts(False)
ui.create_ui()
if not self.default_script_arg_txt2img:
self.default_script_arg_txt2img = self.init_default_script_args(script_runner)
selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner)
populate = txt2imgreq.copy(update={ # Override __init__ params populate = txt2imgreq.copy(update={ # Override __init__ params
"sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index), "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index),
"do_not_save_samples": True, "do_not_save_samples": not txt2imgreq.save_images,
"do_not_save_grid": True "do_not_save_grid": not txt2imgreq.save_images,
} })
)
if populate.sampler_name: if populate.sampler_name:
populate.sampler_index = None # prevent a warning later on populate.sampler_index = None # prevent a warning later on
args = vars(populate) args = vars(populate)
args.pop('script_name', None) args.pop('script_name', None)
args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
args.pop('alwayson_scripts', None)
script_args = self.init_script_args(txt2imgreq, self.default_script_arg_txt2img, selectable_scripts, selectable_script_idx, script_runner)
send_images = args.pop('send_images', True)
args.pop('save_images', None)
with self.queue_lock: with self.queue_lock:
p = StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args) p = StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)
p.scripts = script_runner
p.outpath_grids = opts.outdir_txt2img_grids
p.outpath_samples = opts.outdir_txt2img_samples
shared.state.begin() shared.state.begin()
if script is not None: if selectable_scripts != None:
p.outpath_grids = opts.outdir_txt2img_grids p.script_args = script_args
p.outpath_samples = opts.outdir_txt2img_samples processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
p.script_args = [script_idx + 1] + [None] * (script.args_from - 1) + p.script_args
processed = scripts.scripts_txt2img.run(p, *p.script_args)
else: else:
p.script_args = tuple(script_args) # Need to pass args as tuple here
processed = process_images(p) processed = process_images(p)
shared.state.end() shared.state.end()
b64images = list(map(encode_pil_to_base64, processed.images)) b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js()) return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
@ -211,41 +326,55 @@ class Api:
if init_images is None: if init_images is None:
raise HTTPException(status_code=404, detail="Init image not found") raise HTTPException(status_code=404, detail="Init image not found")
script, script_idx = self.get_script(img2imgreq.script_name, scripts.scripts_img2img)
mask = img2imgreq.mask mask = img2imgreq.mask
if mask: if mask:
mask = decode_base64_to_image(mask) mask = decode_base64_to_image(mask)
populate = img2imgreq.copy(update={ # Override __init__ params script_runner = scripts.scripts_img2img
if not script_runner.scripts:
script_runner.initialize_scripts(True)
ui.create_ui()
if not self.default_script_arg_img2img:
self.default_script_arg_img2img = self.init_default_script_args(script_runner)
selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner)
populate = img2imgreq.copy(update={ # Override __init__ params
"sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index), "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index),
"do_not_save_samples": True, "do_not_save_samples": not img2imgreq.save_images,
"do_not_save_grid": True, "do_not_save_grid": not img2imgreq.save_images,
"mask": mask "mask": mask,
} })
)
if populate.sampler_name: if populate.sampler_name:
populate.sampler_index = None # prevent a warning later on populate.sampler_index = None # prevent a warning later on
args = vars(populate) args = vars(populate)
args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine. args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine.
args.pop('script_name', None) args.pop('script_name', None)
args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
args.pop('alwayson_scripts', None)
script_args = self.init_script_args(img2imgreq, self.default_script_arg_img2img, selectable_scripts, selectable_script_idx, script_runner)
send_images = args.pop('send_images', True)
args.pop('save_images', None)
with self.queue_lock: with self.queue_lock:
p = StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args) p = StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)
p.init_images = [decode_base64_to_image(x) for x in init_images] p.init_images = [decode_base64_to_image(x) for x in init_images]
p.scripts = script_runner
p.outpath_grids = opts.outdir_img2img_grids
p.outpath_samples = opts.outdir_img2img_samples
shared.state.begin() shared.state.begin()
if script is not None: if selectable_scripts != None:
p.outpath_grids = opts.outdir_img2img_grids p.script_args = script_args
p.outpath_samples = opts.outdir_img2img_samples processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
p.script_args = [script_idx + 1] + [None] * (script.args_from - 1) + p.script_args
processed = scripts.scripts_img2img.run(p, *p.script_args)
else: else:
p.script_args = tuple(script_args) # Need to pass args as tuple here
processed = process_images(p) processed = process_images(p)
shared.state.end() shared.state.end()
b64images = list(map(encode_pil_to_base64, processed.images)) b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
if not img2imgreq.include_init_images: if not img2imgreq.include_init_images:
img2imgreq.init_images = None img2imgreq.init_images = None
@ -347,6 +476,16 @@ class Api:
return {} return {}
def unloadapi(self):
unload_model_weights()
return {}
def reloadapi(self):
reload_model_weights()
return {}
def skip(self): def skip(self):
shared.state.skip() shared.state.skip()
@ -387,7 +526,7 @@ class Api:
] ]
def get_sd_models(self): def get_sd_models(self):
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config(x)} for x in checkpoints_list.values()] return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
def get_hypernetworks(self): def get_hypernetworks(self):
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks] return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
@ -497,7 +636,7 @@ class Api:
if not apply_optimizations: if not apply_optimizations:
sd_hijack.undo_optimizations() sd_hijack.undo_optimizations()
try: try:
hypernetwork, filename = train_hypernetwork(*args) hypernetwork, filename = train_hypernetwork(**args)
except Exception as e: except Exception as e:
error = e error = e
finally: finally:

View File

@ -14,8 +14,8 @@ API_NOT_ALLOWED = [
"outpath_samples", "outpath_samples",
"outpath_grids", "outpath_grids",
"sampler_index", "sampler_index",
"do_not_save_samples", # "do_not_save_samples",
"do_not_save_grid", # "do_not_save_grid",
"extra_generation_params", "extra_generation_params",
"overlay_images", "overlay_images",
"do_not_reload_embeddings", "do_not_reload_embeddings",
@ -100,13 +100,31 @@ class PydanticModelGenerator:
StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
"StableDiffusionProcessingTxt2Img", "StableDiffusionProcessingTxt2Img",
StableDiffusionProcessingTxt2Img, StableDiffusionProcessingTxt2Img,
[{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "script_name", "type": str, "default": None}, {"key": "script_args", "type": list, "default": []}] [
{"key": "sampler_index", "type": str, "default": "Euler"},
{"key": "script_name", "type": str, "default": None},
{"key": "script_args", "type": list, "default": []},
{"key": "send_images", "type": bool, "default": True},
{"key": "save_images", "type": bool, "default": False},
{"key": "alwayson_scripts", "type": dict, "default": {}},
]
).generate_model() ).generate_model()
StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
"StableDiffusionProcessingImg2Img", "StableDiffusionProcessingImg2Img",
StableDiffusionProcessingImg2Img, StableDiffusionProcessingImg2Img,
[{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}, {"key": "script_name", "type": str, "default": None}, {"key": "script_args", "type": list, "default": []}] [
{"key": "sampler_index", "type": str, "default": "Euler"},
{"key": "init_images", "type": list, "default": None},
{"key": "denoising_strength", "type": float, "default": 0.75},
{"key": "mask", "type": str, "default": None},
{"key": "include_init_images", "type": bool, "default": False, "exclude" : True},
{"key": "script_name", "type": str, "default": None},
{"key": "script_args", "type": list, "default": []},
{"key": "send_images", "type": bool, "default": True},
{"key": "save_images", "type": bool, "default": False},
{"key": "alwayson_scripts", "type": dict, "default": {}},
]
).generate_model() ).generate_model()
class TextToImageResponse(BaseModel): class TextToImageResponse(BaseModel):
@ -228,7 +246,7 @@ class SDModelItem(BaseModel):
hash: Optional[str] = Field(title="Short hash") hash: Optional[str] = Field(title="Short hash")
sha256: Optional[str] = Field(title="sha256 hash") sha256: Optional[str] = Field(title="sha256 hash")
filename: str = Field(title="Filename") filename: str = Field(title="Filename")
config: str = Field(title="Config file") config: Optional[str] = Field(title="Config file")
class HypernetworkItem(BaseModel): class HypernetworkItem(BaseModel):
name: str = Field(title="Name") name: str = Field(title="Name")
@ -267,3 +285,7 @@ class EmbeddingsResponse(BaseModel):
class MemoryResponse(BaseModel): class MemoryResponse(BaseModel):
ram: dict = Field(title="RAM", description="System memory stats") ram: dict = Field(title="RAM", description="System memory stats")
cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats") cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats")
class ScriptsList(BaseModel):
txt2img: list = Field(default=None,title="Txt2img", description="Titles of scripts (txt2img)")
img2img: list = Field(default=None,title="Img2img", description="Titles of scripts (img2img)")

103
modules/cmd_args.py Normal file
View File

@ -0,0 +1,103 @@
import argparse
import os
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file
parser = argparse.ArgumentParser()
parser.add_argument("-f", action='store_true', help=argparse.SUPPRESS) # allows running as root; implemented outside of webui
parser.add_argument("--update-all-extensions", action='store_true', help="launch.py argument: download updates for all extensions when starting the program")
parser.add_argument("--skip-python-version-check", action='store_true', help="launch.py argument: do not check python version")
parser.add_argument("--skip-torch-cuda-test", action='store_true', help="launch.py argument: do not check if CUDA is able to work properly")
parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py argument: install the appropriate version of xformers even if you have some version already installed")
parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed")
parser.add_argument("--update-check", action='store_true', help="launch.py argument: chck for updates at startup")
parser.add_argument("--tests", type=str, default=None, help="launch.py argument: run tests in the specified directory")
parser.add_argument("--no-tests", action='store_true', help="launch.py argument: do not run tests even if --tests option is specified")
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
parser.add_argument("--vae-dir", type=str, default=None, help="Path to directory with VAE files")
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--textual-inversion-templates-dir", type=str, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates")
parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)")
parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization")
parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024)
parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None)
parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--opt-sdp-attention", action='store_true', help="enable scaled dot product cross-attention layer optimization; requires PyTorch 2.*")
parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="enable scaled dot product cross-attention layer optimization without memory efficient attention, makes image generation deterministic; requires PyTorch 2.*")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(data_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)")
parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--api-log", action='store_true', help="use api-log=True to enable logging of all API requests")
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the API instead of the webui")
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origin(s) in the form of a comma-separated list (no spaces)", default=None)
parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS origin(s) in the form of a single regular expression", default=None)
parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
parser.add_argument("--gradio-queue", action='store_true', help="does not do anything", default=True)
parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions")
parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers")
parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False)
parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False)

View File

@ -8,7 +8,7 @@ import torch
import modules.face_restoration import modules.face_restoration
import modules.shared import modules.shared
from modules import shared, devices, modelloader from modules import shared, devices, modelloader
from modules.paths import script_path, models_path from modules.paths import models_path
# codeformer people made a choice to include modified basicsr library to their project which makes # codeformer people made a choice to include modified basicsr library to their project which makes
# it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN. # it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN.
@ -55,7 +55,7 @@ def setup_model(dirname):
if self.net is not None and self.face_helper is not None: if self.net is not None and self.face_helper is not None:
self.net.to(devices.device_codeformer) self.net.to(devices.device_codeformer)
return self.net, self.face_helper return self.net, self.face_helper
model_paths = modelloader.load_models(model_path, model_url, self.cmd_dir, download_name='codeformer-v0.1.0.pth') model_paths = modelloader.load_models(model_path, model_url, self.cmd_dir, download_name='codeformer-v0.1.0.pth', ext_filter=['.pth'])
if len(model_paths) != 0: if len(model_paths) != 0:
ckpt_path = model_paths[0] ckpt_path = model_paths[0]
else: else:

View File

@ -2,6 +2,8 @@ import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from modules import devices
# see https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more # see https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more
@ -196,7 +198,7 @@ class DeepDanbooruModel(nn.Module):
t_358, = inputs t_358, = inputs
t_359 = t_358.permute(*[0, 3, 1, 2]) t_359 = t_358.permute(*[0, 3, 1, 2])
t_359_padded = F.pad(t_359, [2, 3, 2, 3], value=0) t_359_padded = F.pad(t_359, [2, 3, 2, 3], value=0)
t_360 = self.n_Conv_0(t_359_padded) t_360 = self.n_Conv_0(t_359_padded.to(self.n_Conv_0.bias.dtype) if devices.unet_needs_upcast else t_359_padded)
t_361 = F.relu(t_360) t_361 = F.relu(t_360)
t_361 = F.pad(t_361, [0, 1, 0, 1], value=float('-inf')) t_361 = F.pad(t_361, [0, 1, 0, 1], value=float('-inf'))
t_362 = self.n_MaxPool_0(t_361) t_362 = self.n_MaxPool_0(t_361)

View File

@ -1,21 +1,17 @@
import sys, os, shlex import sys
import contextlib import contextlib
import torch import torch
from modules import errors from modules import errors
from packaging import version
if sys.platform == "darwin":
from modules import mac_specific
# has_mps is only available in nightly pytorch (for now) and macOS 12.3+.
# check `getattr` and try it for compatibility
def has_mps() -> bool: def has_mps() -> bool:
if not getattr(torch, 'has_mps', False): if sys.platform != "darwin":
return False return False
try: else:
torch.zeros(1).to(torch.device("mps")) return mac_specific.has_mps
return True
except Exception:
return False
def extract_device_id(args, name): def extract_device_id(args, name):
for x in range(len(args)): for x in range(len(args)):
@ -34,14 +30,18 @@ def get_cuda_device_string():
return "cuda" return "cuda"
def get_optimal_device(): def get_optimal_device_name():
if torch.cuda.is_available(): if torch.cuda.is_available():
return torch.device(get_cuda_device_string()) return get_cuda_device_string()
if has_mps(): if has_mps():
return torch.device("mps") return "mps"
return cpu return "cpu"
def get_optimal_device():
return torch.device(get_optimal_device_name())
def get_device_for(task): def get_device_for(task):
@ -79,6 +79,16 @@ cpu = torch.device("cpu")
device = device_interrogate = device_gfpgan = device_esrgan = device_codeformer = None device = device_interrogate = device_gfpgan = device_esrgan = device_codeformer = None
dtype = torch.float16 dtype = torch.float16
dtype_vae = torch.float16 dtype_vae = torch.float16
dtype_unet = torch.float16
unet_needs_upcast = False
def cond_cast_unet(input):
return input.to(dtype_unet) if unet_needs_upcast else input
def cond_cast_float(input):
return input.float() if unet_needs_upcast else input
def randn(seed, shape): def randn(seed, shape):
@ -106,6 +116,10 @@ def autocast(disable=False):
return torch.autocast("cuda") return torch.autocast("cuda")
def without_autocast(disable=False):
return torch.autocast("cuda", enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext()
class NansException(Exception): class NansException(Exception):
pass pass
@ -123,7 +137,7 @@ def test_for_nans(x, where):
message = "A tensor with all NaNs was produced in Unet." message = "A tensor with all NaNs was produced in Unet."
if not shared.cmd_opts.no_half: if not shared.cmd_opts.no_half:
message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try using --no-half commandline argument to fix this." message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this."
elif where == "vae": elif where == "vae":
message = "A tensor with all NaNs was produced in VAE." message = "A tensor with all NaNs was produced in VAE."
@ -133,60 +147,6 @@ def test_for_nans(x, where):
else: else:
message = "A tensor with all NaNs was produced." message = "A tensor with all NaNs was produced."
message += " Use --disable-nan-check commandline argument to disable this check."
raise NansException(message) raise NansException(message)
# MPS workaround for https://github.com/pytorch/pytorch/issues/79383
orig_tensor_to = torch.Tensor.to
def tensor_to_fix(self, *args, **kwargs):
if self.device.type != 'mps' and \
((len(args) > 0 and isinstance(args[0], torch.device) and args[0].type == 'mps') or \
(isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps')):
self = self.contiguous()
return orig_tensor_to(self, *args, **kwargs)
# MPS workaround for https://github.com/pytorch/pytorch/issues/80800
orig_layer_norm = torch.nn.functional.layer_norm
def layer_norm_fix(*args, **kwargs):
if len(args) > 0 and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps':
args = list(args)
args[0] = args[0].contiguous()
return orig_layer_norm(*args, **kwargs)
# MPS workaround for https://github.com/pytorch/pytorch/issues/90532
orig_tensor_numpy = torch.Tensor.numpy
def numpy_fix(self, *args, **kwargs):
if self.requires_grad:
self = self.detach()
return orig_tensor_numpy(self, *args, **kwargs)
# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
orig_cumsum = torch.cumsum
orig_Tensor_cumsum = torch.Tensor.cumsum
def cumsum_fix(input, cumsum_func, *args, **kwargs):
if input.device.type == 'mps':
output_dtype = kwargs.get('dtype', input.dtype)
if output_dtype == torch.int64:
return cumsum_func(input.cpu(), *args, **kwargs).to(input.device)
elif cumsum_needs_bool_fix and output_dtype == torch.bool or cumsum_needs_int_fix and (output_dtype == torch.int8 or output_dtype == torch.int16):
return cumsum_func(input.to(torch.int32), *args, **kwargs).to(torch.int64)
return cumsum_func(input, *args, **kwargs)
if has_mps():
if version.parse(torch.__version__) < version.parse("1.13"):
# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
torch.Tensor.to = tensor_to_fix
torch.nn.functional.layer_norm = layer_norm_fix
torch.Tensor.numpy = numpy_fix
elif version.parse(torch.__version__) > version.parse("1.13.1"):
cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
cumsum_needs_bool_fix = not torch.BoolTensor([True,True]).to(device=torch.device("mps"), dtype=torch.int64).equal(torch.BoolTensor([True,False]).to(torch.device("mps")).cumsum(0))
torch.cumsum = lambda input, *args, **kwargs: ( cumsum_fix(input, orig_cumsum, *args, **kwargs) )
torch.Tensor.cumsum = lambda self, *args, **kwargs: ( cumsum_fix(self, orig_Tensor_cumsum, *args, **kwargs) )
orig_narrow = torch.narrow
torch.narrow = lambda *args, **kwargs: ( orig_narrow(*args, **kwargs).clone() )

View File

@ -1,5 +1,6 @@
# this file is adapted from https://github.com/victorca25/iNNfer # this file is adapted from https://github.com/victorca25/iNNfer
from collections import OrderedDict
import math import math
import functools import functools
import torch import torch

View File

@ -2,17 +2,25 @@ import os
import sys import sys
import traceback import traceback
import time
import git import git
from modules import paths, shared from modules import shared
from modules.paths_internal import extensions_dir, extensions_builtin_dir
extensions = [] extensions = []
extensions_dir = os.path.join(paths.script_path, "extensions")
extensions_builtin_dir = os.path.join(paths.script_path, "extensions-builtin") if not os.path.exists(extensions_dir):
os.makedirs(extensions_dir)
def active(): def active():
return [x for x in extensions if x.enabled] if shared.opts.disable_all_extensions == "all":
return []
elif shared.opts.disable_all_extensions == "extra":
return [x for x in extensions if x.enabled and x.is_builtin]
else:
return [x for x in extensions if x.enabled]
class Extension: class Extension:
@ -23,21 +31,34 @@ class Extension:
self.status = '' self.status = ''
self.can_update = False self.can_update = False
self.is_builtin = is_builtin self.is_builtin = is_builtin
self.version = ''
self.remote = None
self.have_info_from_repo = False
def read_info_from_repo(self):
if self.have_info_from_repo:
return
self.have_info_from_repo = True
repo = None repo = None
try: try:
if os.path.exists(os.path.join(path, ".git")): if os.path.exists(os.path.join(self.path, ".git")):
repo = git.Repo(path) repo = git.Repo(self.path)
except Exception: except Exception:
print(f"Error reading github repository info from {path}:", file=sys.stderr) print(f"Error reading github repository info from {self.path}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
if repo is None or repo.bare: if repo is None or repo.bare:
self.remote = None self.remote = None
else: else:
try: try:
self.remote = next(repo.remote().urls, None)
self.status = 'unknown' self.status = 'unknown'
self.remote = next(repo.remote().urls, None)
head = repo.head.commit
ts = time.asctime(time.gmtime(repo.head.commit.committed_date))
self.version = f'{head.hexsha[:8]} ({ts})'
except Exception: except Exception:
self.remote = None self.remote = None
@ -58,7 +79,7 @@ class Extension:
def check_updates(self): def check_updates(self):
repo = git.Repo(self.path) repo = git.Repo(self.path)
for fetch in repo.remote().fetch("--dry-run"): for fetch in repo.remote().fetch(dry_run=True):
if fetch.flags != fetch.HEAD_UPTODATE: if fetch.flags != fetch.HEAD_UPTODATE:
self.can_update = True self.can_update = True
self.status = "behind" self.status = "behind"
@ -71,8 +92,8 @@ class Extension:
repo = git.Repo(self.path) repo = git.Repo(self.path)
# Fix: `error: Your local changes to the following files would be overwritten by merge`, # Fix: `error: Your local changes to the following files would be overwritten by merge`,
# because WSL2 Docker set 755 file permissions instead of 644, this results to the error. # because WSL2 Docker set 755 file permissions instead of 644, this results to the error.
repo.git.fetch('--all') repo.git.fetch(all=True)
repo.git.reset('--hard', 'origin') repo.git.reset('origin', hard=True)
def list_extensions(): def list_extensions():
@ -81,7 +102,12 @@ def list_extensions():
if not os.path.isdir(extensions_dir): if not os.path.isdir(extensions_dir):
return return
paths = [] if shared.opts.disable_all_extensions == "all":
print("*** \"Disable all extensions\" option was set, will not load any extensions ***")
elif shared.opts.disable_all_extensions == "extra":
print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***")
extension_paths = []
for dirname in [extensions_dir, extensions_builtin_dir]: for dirname in [extensions_dir, extensions_builtin_dir]:
if not os.path.isdir(dirname): if not os.path.isdir(dirname):
return return
@ -91,9 +117,8 @@ def list_extensions():
if not os.path.isdir(path): if not os.path.isdir(path):
continue continue
paths.append((extension_dirname, path, dirname == extensions_builtin_dir)) extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir))
for dirname, path, is_builtin in paths: for dirname, path, is_builtin in extension_paths:
extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin) extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin)
extensions.append(extension) extensions.append(extension)

View File

@ -1,4 +1,4 @@
from modules import extra_networks from modules import extra_networks, shared, extra_networks
from modules.hypernetworks import hypernetwork from modules.hypernetworks import hypernetwork
@ -7,6 +7,12 @@ class ExtraNetworkHypernet(extra_networks.ExtraNetwork):
super().__init__('hypernet') super().__init__('hypernet')
def activate(self, p, params_list): def activate(self, p, params_list):
additional = shared.opts.sd_hypernetwork
if additional != "" and additional in shared.hypernetworks and len([x for x in params_list if x.items[0] == additional]) == 0:
p.all_prompts = [x + f"<hypernet:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
names = [] names = []
multipliers = [] multipliers = []
for params in params_list: for params in params_list:

View File

@ -6,7 +6,7 @@ import shutil
import torch import torch
import tqdm import tqdm
from modules import shared, images, sd_models, sd_vae from modules import shared, images, sd_models, sd_vae, sd_models_config
from modules.ui_common import plaintext_to_html from modules.ui_common import plaintext_to_html
import gradio as gr import gradio as gr
import safetensors.torch import safetensors.torch
@ -37,7 +37,7 @@ def run_pnginfo(image):
def create_config(ckpt_result, config_source, a, b, c): def create_config(ckpt_result, config_source, a, b, c):
def config(x): def config(x):
res = sd_models.find_checkpoint_config(x) if x else None res = sd_models_config.find_checkpoint_config_near_filename(x) if x else None
return res if res != shared.sd_default_config else None return res if res != shared.sd_default_config else None
if config_source == 0: if config_source == 0:
@ -132,6 +132,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
tertiary_model_info = sd_models.checkpoints_list[tertiary_model_name] if theta_func1 else None tertiary_model_info = sd_models.checkpoints_list[tertiary_model_name] if theta_func1 else None
result_is_inpainting_model = False result_is_inpainting_model = False
result_is_instruct_pix2pix_model = False
if theta_func2: if theta_func2:
shared.state.textinfo = f"Loading B" shared.state.textinfo = f"Loading B"
@ -185,14 +186,19 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
if a.shape != b.shape and a.shape[0:1] + a.shape[2:] == b.shape[0:1] + b.shape[2:]: if a.shape != b.shape and a.shape[0:1] + a.shape[2:] == b.shape[0:1] + b.shape[2:]:
if a.shape[1] == 4 and b.shape[1] == 9: if a.shape[1] == 4 and b.shape[1] == 9:
raise RuntimeError("When merging inpainting model with a normal one, A must be the inpainting model.") raise RuntimeError("When merging inpainting model with a normal one, A must be the inpainting model.")
if a.shape[1] == 4 and b.shape[1] == 8:
raise RuntimeError("When merging instruct-pix2pix model with a normal one, A must be the instruct-pix2pix model.")
assert a.shape[1] == 9 and b.shape[1] == 4, f"Bad dimensions for merged layer {key}: A={a.shape}, B={b.shape}" if a.shape[1] == 8 and b.shape[1] == 4:#If we have an Instruct-Pix2Pix model...
theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)#Merge only the vectors the models have in common. Otherwise we get an error due to dimension mismatch.
theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier) result_is_instruct_pix2pix_model = True
result_is_inpainting_model = True else:
assert a.shape[1] == 9 and b.shape[1] == 4, f"Bad dimensions for merged layer {key}: A={a.shape}, B={b.shape}"
theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
result_is_inpainting_model = True
else: else:
theta_0[key] = theta_func2(a, b, multiplier) theta_0[key] = theta_func2(a, b, multiplier)
theta_0[key] = to_half(theta_0[key], save_as_half) theta_0[key] = to_half(theta_0[key], save_as_half)
shared.state.sampling_step += 1 shared.state.sampling_step += 1
@ -226,6 +232,7 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
filename = filename_generator() if custom_name == '' else custom_name filename = filename_generator() if custom_name == '' else custom_name
filename += ".inpainting" if result_is_inpainting_model else "" filename += ".inpainting" if result_is_inpainting_model else ""
filename += ".instruct-pix2pix" if result_is_instruct_pix2pix_model else ""
filename += "." + checkpoint_format filename += "." + checkpoint_format
output_modelname = os.path.join(ckpt_dir, filename) output_modelname = os.path.join(ckpt_dir, filename)

View File

@ -1,4 +1,5 @@
import base64 import base64
import html
import io import io
import math import math
import os import os
@ -6,24 +7,34 @@ import re
from pathlib import Path from pathlib import Path
import gradio as gr import gradio as gr
from modules.shared import script_path from modules.paths import data_path
from modules import shared, ui_tempdir, script_callbacks from modules import shared, ui_tempdir, script_callbacks
import tempfile import tempfile
from PIL import Image from PIL import Image
re_param_code = r'\s*([\w ]+):\s*("(?:\\|\"|[^\"])+"|[^,]*)(?:,|$)' re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)'
re_param = re.compile(re_param_code) re_param = re.compile(re_param_code)
re_params = re.compile(r"^(?:" + re_param_code + "){3,}$")
re_imagesize = re.compile(r"^(\d+)x(\d+)$") re_imagesize = re.compile(r"^(\d+)x(\d+)$")
re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$") re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$")
type_of_gr_update = type(gr.update()) type_of_gr_update = type(gr.update())
paste_fields = {} paste_fields = {}
bind_list = [] registered_param_bindings = []
class ParamBinding:
def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=[]):
self.paste_button = paste_button
self.tabname = tabname
self.source_text_component = source_text_component
self.source_image_component = source_image_component
self.source_tabname = source_tabname
self.override_settings_component = override_settings_component
self.paste_field_names = paste_field_names
def reset(): def reset():
paste_fields.clear() paste_fields.clear()
bind_list.clear()
def quote(text): def quote(text):
@ -64,8 +75,8 @@ def image_from_url_text(filedata):
return image return image
def add_paste_fields(tabname, init_img, fields): def add_paste_fields(tabname, init_img, fields, override_settings_component=None):
paste_fields[tabname] = {"init_img": init_img, "fields": fields} paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component}
# backwards compatibility for existing extensions # backwards compatibility for existing extensions
import modules.ui import modules.ui
@ -75,26 +86,6 @@ def add_paste_fields(tabname, init_img, fields):
modules.ui.img2img_paste_fields = fields modules.ui.img2img_paste_fields = fields
def integrate_settings_paste_fields(component_dict):
from modules import ui
settings_map = {
'CLIP_stop_at_last_layers': 'Clip skip',
'inpainting_mask_weight': 'Conditional mask weight',
'sd_model_checkpoint': 'Model hash',
'eta_noise_seed_delta': 'ENSD',
'initial_noise_multiplier': 'Noise multiplier',
}
settings_paste_fields = [
(component_dict[k], lambda d, k=k, v=v: ui.apply_setting(k, d.get(v, None)))
for k, v in settings_map.items()
]
for tabname, info in paste_fields.items():
if info["fields"] is not None:
info["fields"] += settings_paste_fields
def create_buttons(tabs_list): def create_buttons(tabs_list):
buttons = {} buttons = {}
for tab in tabs_list: for tab in tabs_list:
@ -102,9 +93,61 @@ def create_buttons(tabs_list):
return buttons return buttons
#if send_generate_info is a tab name, mean generate_info comes from the params fields of the tab
def bind_buttons(buttons, send_image, send_generate_info): def bind_buttons(buttons, send_image, send_generate_info):
bind_list.append([buttons, send_image, send_generate_info]) """old function for backwards compatibility; do not use this, use register_paste_params_button"""
for tabname, button in buttons.items():
source_text_component = send_generate_info if isinstance(send_generate_info, gr.components.Component) else None
source_tabname = send_generate_info if isinstance(send_generate_info, str) else None
register_paste_params_button(ParamBinding(paste_button=button, tabname=tabname, source_text_component=source_text_component, source_image_component=send_image, source_tabname=source_tabname))
def register_paste_params_button(binding: ParamBinding):
registered_param_bindings.append(binding)
def connect_paste_params_buttons():
binding: ParamBinding
for binding in registered_param_bindings:
destination_image_component = paste_fields[binding.tabname]["init_img"]
fields = paste_fields[binding.tabname]["fields"]
override_settings_component = binding.override_settings_component or paste_fields[binding.tabname]["override_settings_component"]
destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None)
destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None)
if binding.source_image_component and destination_image_component:
if isinstance(binding.source_image_component, gr.Gallery):
func = send_image_and_dimensions if destination_width_component else image_from_url_text
jsfunc = "extract_image_from_gallery"
else:
func = send_image_and_dimensions if destination_width_component else lambda x: x
jsfunc = None
binding.paste_button.click(
fn=func,
_js=jsfunc,
inputs=[binding.source_image_component],
outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component],
)
if binding.source_text_component is not None and fields is not None:
connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname)
if binding.source_tabname is not None and fields is not None:
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) + binding.paste_field_names
binding.paste_button.click(
fn=lambda *x: x,
inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names],
outputs=[field for field, name in fields if name in paste_field_names],
)
binding.paste_button.click(
fn=None,
_js=f"switch_to_{binding.tabname}",
inputs=None,
outputs=None,
)
def send_image_and_dimensions(x): def send_image_and_dimensions(x):
@ -123,49 +166,6 @@ def send_image_and_dimensions(x):
return img, w, h return img, w, h
def run_bind():
for buttons, source_image_component, send_generate_info in bind_list:
for tab in buttons:
button = buttons[tab]
destination_image_component = paste_fields[tab]["init_img"]
fields = paste_fields[tab]["fields"]
destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None)
destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None)
if source_image_component and destination_image_component:
if isinstance(source_image_component, gr.Gallery):
func = send_image_and_dimensions if destination_width_component else image_from_url_text
jsfunc = "extract_image_from_gallery"
else:
func = send_image_and_dimensions if destination_width_component else lambda x: x
jsfunc = None
button.click(
fn=func,
_js=jsfunc,
inputs=[source_image_component],
outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component],
)
if send_generate_info and fields is not None:
if send_generate_info in paste_fields:
paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else [])
button.click(
fn=lambda *x: x,
inputs=[field for field, name in paste_fields[send_generate_info]["fields"] if name in paste_field_names],
outputs=[field for field, name in fields if name in paste_field_names],
)
else:
connect_paste(button, fields, send_generate_info)
button.click(
fn=None,
_js=f"switch_to_{tab}",
inputs=None,
outputs=None,
)
def find_hypernetwork_key(hypernet_name, hypernet_hash=None): def find_hypernetwork_key(hypernet_name, hypernet_hash=None):
"""Determines the config parameter name to use for the hypernet based on the parameters in the infotext. """Determines the config parameter name to use for the hypernet based on the parameters in the infotext.
@ -243,7 +243,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
done_with_prompt = False done_with_prompt = False
*lines, lastline = x.strip().split("\n") *lines, lastline = x.strip().split("\n")
if not re_params.match(lastline): if len(re_param.findall(lastline)) < 3:
lines.append(lastline) lines.append(lastline)
lastline = '' lastline = ''
@ -262,6 +262,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
res["Negative prompt"] = negative_prompt res["Negative prompt"] = negative_prompt
for k, v in re_param.findall(lastline): for k, v in re_param.findall(lastline):
v = v[1:-1] if v[0] == '"' and v[-1] == '"' else v
m = re_imagesize.match(v) m = re_imagesize.match(v)
if m is not None: if m is not None:
res[k+"-1"] = m.group(1) res[k+"-1"] = m.group(1)
@ -286,10 +287,59 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
return res return res
def connect_paste(button, paste_fields, input_comp, jsfunc=None): settings_map = {}
infotext_to_setting_name_mapping = [
('Clip skip', 'CLIP_stop_at_last_layers', ),
('Conditional mask weight', 'inpainting_mask_weight'),
('Model hash', 'sd_model_checkpoint'),
('ENSD', 'eta_noise_seed_delta'),
('Noise multiplier', 'initial_noise_multiplier'),
('Eta', 'eta_ancestral'),
('Eta DDIM', 'eta_ddim'),
('Discard penultimate sigma', 'always_discard_next_to_last_sigma'),
('UniPC variant', 'uni_pc_variant'),
('UniPC skip type', 'uni_pc_skip_type'),
('UniPC order', 'uni_pc_order'),
('UniPC lower order final', 'uni_pc_lower_order_final'),
]
def create_override_settings_dict(text_pairs):
"""creates processing's override_settings parameters from gradio's multiselect
Example input:
['Clip skip: 2', 'Model hash: e6e99610c4', 'ENSD: 31337']
Example output:
{'CLIP_stop_at_last_layers': 2, 'sd_model_checkpoint': 'e6e99610c4', 'eta_noise_seed_delta': 31337}
"""
res = {}
params = {}
for pair in text_pairs:
k, v = pair.split(":", maxsplit=1)
params[k] = v.strip()
for param_name, setting_name in infotext_to_setting_name_mapping:
value = params.get(param_name, None)
if value is None:
continue
res[setting_name] = shared.opts.cast_value(setting_name, value)
return res
def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname):
def paste_func(prompt): def paste_func(prompt):
if not prompt and not shared.cmd_opts.hide_ui_dir_config: if not prompt and not shared.cmd_opts.hide_ui_dir_config:
filename = os.path.join(script_path, "params.txt") filename = os.path.join(data_path, "params.txt")
if os.path.exists(filename): if os.path.exists(filename):
with open(filename, "r", encoding="utf8") as file: with open(filename, "r", encoding="utf8") as file:
prompt = file.read() prompt = file.read()
@ -323,11 +373,42 @@ def connect_paste(button, paste_fields, input_comp, jsfunc=None):
return res return res
if override_settings_component is not None:
def paste_settings(params):
vals = {}
for param_name, setting_name in infotext_to_setting_name_mapping:
v = params.get(param_name, None)
if v is None:
continue
if setting_name == "sd_model_checkpoint" and shared.opts.disable_weights_auto_swap:
continue
v = shared.opts.cast_value(setting_name, v)
current_value = getattr(shared.opts, setting_name, None)
if v == current_value:
continue
vals[param_name] = v
vals_pairs = [f"{k}: {v}" for k, v in vals.items()]
return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=len(vals_pairs) > 0)
paste_fields = paste_fields + [(override_settings_component, paste_settings)]
button.click( button.click(
fn=paste_func, fn=paste_func,
_js=jsfunc,
inputs=[input_comp], inputs=[input_comp],
outputs=[x[0] for x in paste_fields], outputs=[x[0] for x in paste_fields],
) )
button.click(
fn=None,
_js=f"recalculate_prompts_{tabname}",
inputs=[],
outputs=[],
)

View File

@ -6,12 +6,11 @@ import facexlib
import gfpgan import gfpgan
import modules.face_restoration import modules.face_restoration
from modules import shared, devices, modelloader from modules import paths, shared, devices, modelloader
from modules.paths import models_path
model_dir = "GFPGAN" model_dir = "GFPGAN"
user_path = None user_path = None
model_path = os.path.join(models_path, model_dir) model_path = os.path.join(paths.models_path, model_dir)
model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth" model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
have_gfpgan = False have_gfpgan = False
loaded_gfpgan_model = None loaded_gfpgan_model = None

View File

@ -4,8 +4,11 @@ import os.path
import filelock import filelock
from modules import shared
from modules.paths import data_path
cache_filename = "cache.json"
cache_filename = os.path.join(data_path, "cache.json")
cache_data = None cache_data = None
@ -66,6 +69,9 @@ def sha256(filename, title):
if sha256_value is not None: if sha256_value is not None:
return sha256_value return sha256_value
if shared.cmd_opts.no_hashing:
return None
print(f"Calculating sha256 for {filename}: ", end='') print(f"Calculating sha256 for {filename}: ", end='')
sha256_value = calculate_sha256(filename) sha256_value = calculate_sha256(filename)
print(f"{sha256_value}") print(f"{sha256_value}")

View File

@ -307,12 +307,12 @@ class Hypernetwork:
def shorthash(self): def shorthash(self):
sha256 = hashes.sha256(self.filename, f'hypernet/{self.name}') sha256 = hashes.sha256(self.filename, f'hypernet/{self.name}')
return sha256[0:10] return sha256[0:10] if sha256 else None
def list_hypernetworks(path): def list_hypernetworks(path):
res = {} res = {}
for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True)): for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True), key=str.lower):
name = os.path.splitext(os.path.basename(filename))[0] name = os.path.splitext(os.path.basename(filename))[0]
# Prevent a hypothetical "None.pt" from being listed. # Prevent a hypothetical "None.pt" from being listed.
if name != "None": if name != "None":
@ -380,8 +380,8 @@ def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None):
layer.hyper_k = hypernetwork_layers[0] layer.hyper_k = hypernetwork_layers[0]
layer.hyper_v = hypernetwork_layers[1] layer.hyper_v = hypernetwork_layers[1]
context_k = hypernetwork_layers[0](context_k) context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context_k)))
context_v = hypernetwork_layers[1](context_v) context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context_v)))
return context_k, context_v return context_k, context_v
@ -496,7 +496,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None,
shared.reload_hypernetworks() shared.reload_hypernetworks()
def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
# images allows training previews to have infotext. Importing it at the top causes a circular import problem. # images allows training previews to have infotext. Importing it at the top causes a circular import problem.
from modules import images from modules import images
@ -554,7 +554,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
pin_memory = shared.opts.pin_memory pin_memory = shared.opts.pin_memory
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize) ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight)
if shared.opts.save_training_settings_to_txt: if shared.opts.save_training_settings_to_txt:
saved_params = dict( saved_params = dict(
@ -640,13 +640,19 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
with devices.autocast(): with devices.autocast():
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
if use_weight:
w = batch.weight.to(devices.device, non_blocking=pin_memory)
if tag_drop_out != 0 or shuffle_tags: if tag_drop_out != 0 or shuffle_tags:
shared.sd_model.cond_stage_model.to(devices.device) shared.sd_model.cond_stage_model.to(devices.device)
c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory) c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory)
shared.sd_model.cond_stage_model.to(devices.cpu) shared.sd_model.cond_stage_model.to(devices.cpu)
else: else:
c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory) c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory)
loss = shared.sd_model(x, c)[0] / gradient_step if use_weight:
loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step
del w
else:
loss = shared.sd_model.forward(x, c)[0] / gradient_step
del x del x
del c del c

View File

@ -16,8 +16,9 @@ from PIL import Image, ImageFont, ImageDraw, PngImagePlugin
from fonts.ttf import Roboto from fonts.ttf import Roboto
import string import string
import json import json
import hashlib
from modules import sd_samplers, shared, script_callbacks from modules import sd_samplers, shared, script_callbacks, errors
from modules.shared import opts, cmd_opts from modules.shared import opts, cmd_opts
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
@ -36,6 +37,8 @@ def image_grid(imgs, batch_size=1, rows=None):
else: else:
rows = math.sqrt(len(imgs)) rows = math.sqrt(len(imgs))
rows = round(rows) rows = round(rows)
if rows > len(imgs):
rows = len(imgs)
cols = math.ceil(len(imgs) / rows) cols = math.ceil(len(imgs) / rows)
@ -128,7 +131,7 @@ class GridAnnotation:
self.size = None self.size = None
def draw_grid_annotations(im, width, height, hor_texts, ver_texts): def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
def wrap(drawing, text, font, line_length): def wrap(drawing, text, font, line_length):
lines = [''] lines = ['']
for word in text.split(): for word in text.split():
@ -192,32 +195,35 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts):
line.allowed_width = allowed_width line.allowed_width = allowed_width
hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts] hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts]
ver_texts]
pad_top = max(hor_text_heights) + line_spacing * 2 pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2
result = Image.new("RGB", (im.width + pad_left, im.height + pad_top), "white") result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + margin * (rows-1)), "white")
result.paste(im, (pad_left, pad_top))
for row in range(rows):
for col in range(cols):
cell = im.crop((width * col, height * row, width * (col+1), height * (row+1)))
result.paste(cell, (pad_left + (width + margin) * col, pad_top + (height + margin) * row))
d = ImageDraw.Draw(result) d = ImageDraw.Draw(result)
for col in range(cols): for col in range(cols):
x = pad_left + width * col + width / 2 x = pad_left + (width + margin) * col + width / 2
y = pad_top / 2 - hor_text_heights[col] / 2 y = pad_top / 2 - hor_text_heights[col] / 2
draw_texts(d, x, y, hor_texts[col], fnt, fontsize) draw_texts(d, x, y, hor_texts[col], fnt, fontsize)
for row in range(rows): for row in range(rows):
x = pad_left / 2 x = pad_left / 2
y = pad_top + height * row + height / 2 - ver_text_heights[row] / 2 y = pad_top + (height + margin) * row + height / 2 - ver_text_heights[row] / 2
draw_texts(d, x, y, ver_texts[row], fnt, fontsize) draw_texts(d, x, y, ver_texts[row], fnt, fontsize)
return result return result
def draw_prompt_matrix(im, width, height, all_prompts): def draw_prompt_matrix(im, width, height, all_prompts, margin=0):
prompts = all_prompts[1:] prompts = all_prompts[1:]
boundary = math.ceil(len(prompts) / 2) boundary = math.ceil(len(prompts) / 2)
@ -227,7 +233,7 @@ def draw_prompt_matrix(im, width, height, all_prompts):
hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))] hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))] ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
return draw_grid_annotations(im, width, height, hor_texts, ver_texts) return draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin)
def resize_image(resize_mode, im, width, height, upscaler_name=None): def resize_image(resize_mode, im, width, height, upscaler_name=None):
@ -255,9 +261,12 @@ def resize_image(resize_mode, im, width, height, upscaler_name=None):
if scale > 1.0: if scale > 1.0:
upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name] upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name]
assert len(upscalers) > 0, f"could not find upscaler named {upscaler_name}" if len(upscalers) == 0:
upscaler = shared.sd_upscalers[0]
print(f"could not find upscaler named {upscaler_name or '<empty string>'}, using {upscaler.name} as a fallback")
else:
upscaler = upscalers[0]
upscaler = upscalers[0]
im = upscaler.scaler.upscale(im, scale, upscaler.data_path) im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
if im.width != w or im.height != h: if im.width != w or im.height != h:
@ -338,6 +347,7 @@ class FilenameGenerator:
'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'), 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>] 'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp), 'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
'prompt_hash': lambda self: hashlib.sha256(self.prompt.encode()).hexdigest()[0:8],
'prompt': lambda self: sanitize_filename_part(self.prompt), 'prompt': lambda self: sanitize_filename_part(self.prompt),
'prompt_no_styles': lambda self: self.prompt_no_style(), 'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False), 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
@ -546,8 +556,10 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
elif extension.lower() in (".jpg", ".jpeg", ".webp"): elif extension.lower() in (".jpg", ".jpeg", ".webp"):
if image_to_save.mode == 'RGBA': if image_to_save.mode == 'RGBA':
image_to_save = image_to_save.convert("RGB") image_to_save = image_to_save.convert("RGB")
elif image_to_save.mode == 'I;16':
image_to_save = image_to_save.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L")
image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality) image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, lossless=opts.webp_lossless)
if opts.enable_pnginfo and info is not None: if opts.enable_pnginfo and info is not None:
exif_bytes = piexif.dump({ exif_bytes = piexif.dump({
@ -564,21 +576,28 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
os.replace(temp_file_path, filename_without_extension + extension) os.replace(temp_file_path, filename_without_extension + extension)
fullfn_without_extension, extension = os.path.splitext(params.filename) fullfn_without_extension, extension = os.path.splitext(params.filename)
if hasattr(os, 'statvfs'):
max_name_len = os.statvfs(path).f_namemax
fullfn_without_extension = fullfn_without_extension[:max_name_len - max(4, len(extension))]
params.filename = fullfn_without_extension + extension
fullfn = params.filename
_atomically_save_image(image, fullfn_without_extension, extension) _atomically_save_image(image, fullfn_without_extension, extension)
image.already_saved_as = fullfn image.already_saved_as = fullfn
target_side_length = 4000 oversize = image.width > opts.target_side_length or image.height > opts.target_side_length
oversize = image.width > target_side_length or image.height > target_side_length if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024):
if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > 4 * 1024 * 1024):
ratio = image.width / image.height ratio = image.width / image.height
if oversize and ratio > 1: if oversize and ratio > 1:
image = image.resize((target_side_length, image.height * target_side_length // image.width), LANCZOS) image = image.resize((round(opts.target_side_length), round(image.height * opts.target_side_length / image.width)), LANCZOS)
elif oversize: elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS) image = image.resize((round(image.width * opts.target_side_length / image.height), round(opts.target_side_length)), LANCZOS)
_atomically_save_image(image, fullfn_without_extension, ".jpg") try:
_atomically_save_image(image, fullfn_without_extension, ".jpg")
except Exception as e:
errors.display(e, "saving image as downscaled JPG")
if opts.save_txt and info is not None: if opts.save_txt and info is not None:
txt_fullfn = f"{fullfn_without_extension}.txt" txt_fullfn = f"{fullfn_without_extension}.txt"
@ -629,6 +648,8 @@ Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}
def image_data(data): def image_data(data):
import gradio as gr
try: try:
image = Image.open(io.BytesIO(data)) image = Image.open(io.BytesIO(data))
textinfo, _ = read_info_from_image(image) textinfo, _ = read_info_from_image(image)
@ -644,7 +665,7 @@ def image_data(data):
except Exception: except Exception:
pass pass
return '', None return gr.update(), None
def flatten(img, bgcolor): def flatten(img, bgcolor):

View File

@ -7,6 +7,7 @@ import numpy as np
from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops
from modules import devices, sd_samplers from modules import devices, sd_samplers
from modules.generation_parameters_copypaste import create_override_settings_dict
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, state from modules.shared import opts, state
import modules.shared as shared import modules.shared as shared
@ -16,11 +17,18 @@ import modules.images as images
import modules.scripts import modules.scripts
def process_batch(p, input_dir, output_dir, args): def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processing.fix_seed(p) processing.fix_seed(p)
images = shared.listfiles(input_dir) images = shared.listfiles(input_dir)
is_inpaint_batch = False
if inpaint_mask_dir:
inpaint_masks = shared.listfiles(inpaint_mask_dir)
is_inpaint_batch = len(inpaint_masks) > 0
if is_inpaint_batch:
print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.") print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
save_normally = output_dir == '' save_normally = output_dir == ''
@ -43,6 +51,15 @@ def process_batch(p, input_dir, output_dir, args):
img = ImageOps.exif_transpose(img) img = ImageOps.exif_transpose(img)
p.init_images = [img] * p.batch_size p.init_images = [img] * p.batch_size
if is_inpaint_batch:
# try to find corresponding mask for an image using simple filename matching
mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))
# if not found use first one ("same mask for all images" use-case)
if not mask_image_path in inpaint_masks:
mask_image_path = inpaint_masks[0]
mask_image = Image.open(mask_image_path)
p.image_mask = mask_image
proc = modules.scripts.scripts_img2img.run(p, *args) proc = modules.scripts.scripts_img2img.run(p, *args)
if proc is None: if proc is None:
proc = process_images(p) proc = process_images(p)
@ -56,10 +73,14 @@ def process_batch(p, input_dir, output_dir, args):
if not save_normally: if not save_normally:
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
if processed_image.mode == 'RGBA':
processed_image = processed_image.convert("RGB")
processed_image.save(os.path.join(output_dir, filename)) processed_image.save(os.path.join(output_dir, filename))
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args): def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5 is_batch = mode == 5
if mode == 0: # img2img if mode == 0: # img2img
@ -123,9 +144,11 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpainting_fill=inpainting_fill, inpainting_fill=inpainting_fill,
resize_mode=resize_mode, resize_mode=resize_mode,
denoising_strength=denoising_strength, denoising_strength=denoising_strength,
image_cfg_scale=image_cfg_scale,
inpaint_full_res=inpaint_full_res, inpaint_full_res=inpaint_full_res,
inpaint_full_res_padding=inpaint_full_res_padding, inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert, inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings,
) )
p.scripts = modules.scripts.scripts_txt2img p.scripts = modules.scripts.scripts_txt2img
@ -134,12 +157,13 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
if shared.cmd_opts.enable_console_prompts: if shared.cmd_opts.enable_console_prompts:
print(f"\nimg2img: {prompt}", file=shared.progress_print_out) print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
p.extra_generation_params["Mask blur"] = mask_blur if mask:
p.extra_generation_params["Mask blur"] = mask_blur
if is_batch: if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, args) process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args)
processed = Processed(p, [], p.seed, "") processed = Processed(p, [], p.seed, "")
else: else:

View File

@ -12,7 +12,7 @@ from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode from torchvision.transforms.functional import InterpolationMode
import modules.shared as shared import modules.shared as shared
from modules import devices, paths, lowvram, modelloader, errors from modules import devices, paths, shared, lowvram, modelloader, errors
blip_image_eval_size = 384 blip_image_eval_size = 384
clip_model_name = 'ViT-L/14' clip_model_name = 'ViT-L/14'

View File

@ -55,12 +55,12 @@ def setup_for_low_vram(sd_model, use_medvram):
if hasattr(sd_model.cond_stage_model, 'model'): if hasattr(sd_model.cond_stage_model, 'model'):
sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model
# remove four big modules, cond, first_stage, depth (if applicable), and unet from the model and then # remove several big modules: cond, first_stage, depth/embedder (if applicable), and unet from the model and then
# send the model to GPU. Then put modules back. the modules will be in CPU. # send the model to GPU. Then put modules back. the modules will be in CPU.
stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), sd_model.model stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), getattr(sd_model, 'embedder', None), sd_model.model
sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = None, None, None, None sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = None, None, None, None, None
sd_model.to(devices.device) sd_model.to(devices.device)
sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = stored sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = stored
# register hooks for those the first three models # register hooks for those the first three models
sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu) sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
@ -69,6 +69,8 @@ def setup_for_low_vram(sd_model, use_medvram):
sd_model.first_stage_model.decode = first_stage_model_decode_wrap sd_model.first_stage_model.decode = first_stage_model_decode_wrap
if sd_model.depth_model: if sd_model.depth_model:
sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu) sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
if sd_model.embedder:
sd_model.embedder.register_forward_pre_hook(send_me_to_gpu)
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
if hasattr(sd_model.cond_stage_model, 'model'): if hasattr(sd_model.cond_stage_model, 'model'):

59
modules/mac_specific.py Normal file
View File

@ -0,0 +1,59 @@
import torch
import platform
from modules import paths
from modules.sd_hijack_utils import CondFunc
from packaging import version
# has_mps is only available in nightly pytorch (for now) and macOS 12.3+.
# check `getattr` and try it for compatibility
def check_for_mps() -> bool:
if not getattr(torch, 'has_mps', False):
return False
try:
torch.zeros(1).to(torch.device("mps"))
return True
except Exception:
return False
has_mps = check_for_mps()
# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
def cumsum_fix(input, cumsum_func, *args, **kwargs):
if input.device.type == 'mps':
output_dtype = kwargs.get('dtype', input.dtype)
if output_dtype == torch.int64:
return cumsum_func(input.cpu(), *args, **kwargs).to(input.device)
elif output_dtype == torch.bool or cumsum_needs_int_fix and (output_dtype == torch.int8 or output_dtype == torch.int16):
return cumsum_func(input.to(torch.int32), *args, **kwargs).to(torch.int64)
return cumsum_func(input, *args, **kwargs)
if has_mps:
# MPS fix for randn in torchsde
CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps')
if platform.mac_ver()[0].startswith("13.2."):
# MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124)
CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760)
if version.parse(torch.__version__) < version.parse("1.13"):
# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
# MPS workaround for https://github.com/pytorch/pytorch/issues/79383
CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs),
lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps'))
# MPS workaround for https://github.com/pytorch/pytorch/issues/80800
CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs),
lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps')
# MPS workaround for https://github.com/pytorch/pytorch/issues/90532
CondFunc('torch.Tensor.numpy', lambda orig_func, self, *args, **kwargs: orig_func(self.detach(), *args, **kwargs), lambda _, self, *args, **kwargs: self.requires_grad)
elif version.parse(torch.__version__) > version.parse("1.13.1"):
cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
cumsum_fix_func = lambda orig_func, input, *args, **kwargs: cumsum_fix(input, orig_func, *args, **kwargs)
CondFunc('torch.cumsum', cumsum_fix_func, None)
CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None)
CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None)
if version.parse(torch.__version__) == version.parse("2.0"):
# MPS workaround for https://github.com/pytorch/pytorch/issues/96113
CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda *args, **kwargs: len(args) == 6)

View File

@ -23,12 +23,16 @@ class MemUsageMonitor(threading.Thread):
self.data = defaultdict(int) self.data = defaultdict(int)
try: try:
torch.cuda.mem_get_info() self.cuda_mem_get_info()
torch.cuda.memory_stats(self.device) torch.cuda.memory_stats(self.device)
except Exception as e: # AMD or whatever except Exception as e: # AMD or whatever
print(f"Warning: caught exception '{e}', memory monitor disabled") print(f"Warning: caught exception '{e}', memory monitor disabled")
self.disabled = True self.disabled = True
def cuda_mem_get_info(self):
index = self.device.index if self.device.index is not None else torch.cuda.current_device()
return torch.cuda.mem_get_info(index)
def run(self): def run(self):
if self.disabled: if self.disabled:
return return
@ -43,10 +47,10 @@ class MemUsageMonitor(threading.Thread):
self.run_flag.clear() self.run_flag.clear()
continue continue
self.data["min_free"] = torch.cuda.mem_get_info()[0] self.data["min_free"] = self.cuda_mem_get_info()[0]
while self.run_flag.is_set(): while self.run_flag.is_set():
free, total = torch.cuda.mem_get_info() # calling with self.device errors, torch bug? free, total = self.cuda_mem_get_info()
self.data["min_free"] = min(self.data["min_free"], free) self.data["min_free"] = min(self.data["min_free"], free)
time.sleep(1 / self.opts.memmon_poll_rate) time.sleep(1 / self.opts.memmon_poll_rate)
@ -70,7 +74,7 @@ class MemUsageMonitor(threading.Thread):
def read(self): def read(self):
if not self.disabled: if not self.disabled:
free, total = torch.cuda.mem_get_info() free, total = self.cuda_mem_get_info()
self.data["free"] = free self.data["free"] = free
self.data["total"] = total self.data["total"] = total

View File

@ -4,9 +4,8 @@ import shutil
import importlib import importlib
from urllib.parse import urlparse from urllib.parse import urlparse
from basicsr.utils.download_util import load_file_from_url
from modules import shared from modules import shared
from modules.upscaler import Upscaler from modules.upscaler import Upscaler, UpscalerLanczos, UpscalerNearest, UpscalerNone
from modules.paths import script_path, models_path from modules.paths import script_path, models_path
@ -45,6 +44,9 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
full_path = file full_path = file
if os.path.isdir(full_path): if os.path.isdir(full_path):
continue continue
if os.path.islink(full_path) and not os.path.exists(full_path):
print(f"Skipping broken symlink: {full_path}")
continue
if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]): if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]):
continue continue
if len(ext_filter) != 0: if len(ext_filter) != 0:
@ -56,6 +58,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
if model_url is not None and len(output) == 0: if model_url is not None and len(output) == 0:
if download_name is not None: if download_name is not None:
from basicsr.utils.download_util import load_file_from_url
dl = load_file_from_url(model_url, model_path, True, download_name) dl = load_file_from_url(model_url, model_path, True, download_name)
output.append(dl) output.append(dl)
else: else:
@ -166,4 +169,8 @@ def load_upscalers():
scaler = cls(commandline_options.get(cmd_name, None)) scaler = cls(commandline_options.get(cmd_name, None))
datas += scaler.scalers datas += scaler.scalers
shared.sd_upscalers = datas shared.sd_upscalers = sorted(
datas,
# Special case for UpscalerNone keeps it at the beginning of the list.
key=lambda x: x.name.lower() if not isinstance(x.scaler, (UpscalerNone, UpscalerLanczos, UpscalerNearest)) else ""
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
from .sampler import UniPCSampler

View File

@ -0,0 +1,100 @@
"""SAMPLING ONLY."""
import torch
from .uni_pc import NoiseScheduleVP, model_wrapper, UniPC
from modules import shared, devices
class UniPCSampler(object):
def __init__(self, model, **kwargs):
super().__init__()
self.model = model
to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
self.before_sample = None
self.after_sample = None
self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != devices.device:
attr = attr.to(devices.device)
setattr(self, name, attr)
def set_hooks(self, before_sample, after_sample, after_update):
self.before_sample = before_sample
self.after_sample = after_sample
self.after_update = after_update
@torch.no_grad()
def sample(self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
**kwargs
):
if conditioning is not None:
if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]]
while isinstance(ctmp, list): ctmp = ctmp[0]
cbs = ctmp.shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
elif isinstance(conditioning, list):
for ctmp in conditioning:
if ctmp.shape[0] != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
else:
if conditioning.shape[0] != batch_size:
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
# print(f'Data shape for UniPC sampling is {size}')
device = self.model.betas.device
if x_T is None:
img = torch.randn(size, device=device)
else:
img = x_T
ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
# SD 1.X is "noise", SD 2.X is "v"
model_type = "v" if self.model.parameterization == "v" else "noise"
model_fn = model_wrapper(
lambda x, t, c: self.model.apply_model(x, t, c),
ns,
model_type=model_type,
guidance_type="classifier-free",
#condition=conditioning,
#unconditional_condition=unconditional_conditioning,
guidance_scale=unconditional_guidance_scale,
)
uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant, condition=conditioning, unconditional_condition=unconditional_conditioning, before_sample=self.before_sample, after_sample=self.after_sample, after_update=self.after_update)
x = uni_pc.sample(img, steps=S, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final)
return x.to(device), None

View File

@ -0,0 +1,857 @@
import torch
import torch.nn.functional as F
import math
from tqdm.auto import trange
class NoiseScheduleVP:
def __init__(
self,
schedule='discrete',
betas=None,
alphas_cumprod=None,
continuous_beta_0=0.1,
continuous_beta_1=20.,
):
"""Create a wrapper class for the forward SDE (VP type).
***
Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
***
The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
log_alpha_t = self.marginal_log_mean_coeff(t)
sigma_t = self.marginal_std(t)
lambda_t = self.marginal_lambda(t)
Moreover, as lambda(t) is an invertible function, we also support its inverse function:
t = self.inverse_lambda(lambda_t)
===============================================================
We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
1. For discrete-time DPMs:
For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
t_i = (i + 1) / N
e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
Args:
betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
**Important**: Please pay special attention for the args for `alphas_cumprod`:
The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
alpha_{t_n} = \sqrt{\hat{alpha_n}},
and
log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
2. For continuous-time DPMs:
We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
schedule are the default settings in DDPM and improved-DDPM:
Args:
beta_min: A `float` number. The smallest beta for the linear schedule.
beta_max: A `float` number. The largest beta for the linear schedule.
cosine_s: A `float` number. The hyperparameter in the cosine schedule.
cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
T: A `float` number. The ending time of the forward process.
===============================================================
Args:
schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
'linear' or 'cosine' for continuous-time DPMs.
Returns:
A wrapper object of the forward SDE (VP type).
===============================================================
Example:
# For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
>>> ns = NoiseScheduleVP('discrete', betas=betas)
# For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
>>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
# For continuous-time DPMs (VPSDE), linear schedule:
>>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
"""
if schedule not in ['discrete', 'linear', 'cosine']:
raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule))
self.schedule = schedule
if schedule == 'discrete':
if betas is not None:
log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
else:
assert alphas_cumprod is not None
log_alphas = 0.5 * torch.log(alphas_cumprod)
self.total_N = len(log_alphas)
self.T = 1.
self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
self.log_alpha_array = log_alphas.reshape((1, -1,))
else:
self.total_N = 1000
self.beta_0 = continuous_beta_0
self.beta_1 = continuous_beta_1
self.cosine_s = 0.008
self.cosine_beta_max = 999.
self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
self.schedule = schedule
if schedule == 'cosine':
# For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
# Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
self.T = 0.9946
else:
self.T = 1.
def marginal_log_mean_coeff(self, t):
"""
Compute log(alpha_t) of a given continuous-time label t in [0, T].
"""
if self.schedule == 'discrete':
return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))
elif self.schedule == 'linear':
return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
elif self.schedule == 'cosine':
log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
return log_alpha_t
def marginal_alpha(self, t):
"""
Compute alpha_t of a given continuous-time label t in [0, T].
"""
return torch.exp(self.marginal_log_mean_coeff(t))
def marginal_std(self, t):
"""
Compute sigma_t of a given continuous-time label t in [0, T].
"""
return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
def marginal_lambda(self, t):
"""
Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
"""
log_mean_coeff = self.marginal_log_mean_coeff(t)
log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
return log_mean_coeff - log_std
def inverse_lambda(self, lamb):
"""
Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
"""
if self.schedule == 'linear':
tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
Delta = self.beta_0**2 + tmp
return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
elif self.schedule == 'discrete':
log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
return t.reshape((-1,))
else:
log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
t = t_fn(log_alpha)
return t
def model_wrapper(
model,
noise_schedule,
model_type="noise",
model_kwargs={},
guidance_type="uncond",
#condition=None,
#unconditional_condition=None,
guidance_scale=1.,
classifier_fn=None,
classifier_kwargs={},
):
"""Create a wrapper function for the noise prediction model.
DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
We support four types of the diffusion model by setting `model_type`:
1. "noise": noise prediction model. (Trained by predicting noise).
2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
3. "v": velocity prediction model. (Trained by predicting the velocity).
The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
[1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
arXiv preprint arXiv:2202.00512 (2022).
[2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
arXiv preprint arXiv:2210.02303 (2022).
4. "score": marginal score function. (Trained by denoising score matching).
Note that the score function and the noise prediction model follows a simple relationship:
```
noise(x_t, t) = -sigma_t * score(x_t, t)
```
We support three types of guided sampling by DPMs by setting `guidance_type`:
1. "uncond": unconditional sampling by DPMs.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
The input `model` has the following format:
``
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
``
The input `classifier_fn` has the following format:
``
classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
``
[3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
The input `model` has the following format:
``
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
``
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
arXiv preprint arXiv:2207.12598 (2022).
The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
or continuous-time labels (i.e. epsilon to T).
We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
``
def model_fn(x, t_continuous) -> noise:
t_input = get_model_input_time(t_continuous)
return noise_pred(model, x, t_input, **model_kwargs)
``
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
===============================================================
Args:
model: A diffusion model with the corresponding format described above.
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
model_type: A `str`. The parameterization type of the diffusion model.
"noise" or "x_start" or "v" or "score".
model_kwargs: A `dict`. A dict for the other inputs of the model function.
guidance_type: A `str`. The type of the guidance for sampling.
"uncond" or "classifier" or "classifier-free".
condition: A pytorch tensor. The condition for the guided sampling.
Only used for "classifier" or "classifier-free" guidance type.
unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
Only used for "classifier-free" guidance type.
guidance_scale: A `float`. The scale for the guided sampling.
classifier_fn: A classifier function. Only used for the classifier guidance.
classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
Returns:
A noise prediction model that accepts the noised data and the continuous time as the inputs.
"""
def get_model_input_time(t_continuous):
"""
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
For continuous-time DPMs, we just use `t_continuous`.
"""
if noise_schedule.schedule == 'discrete':
return (t_continuous - 1. / noise_schedule.total_N) * 1000.
else:
return t_continuous
def noise_pred_fn(x, t_continuous, cond=None):
if t_continuous.reshape((-1,)).shape[0] == 1:
t_continuous = t_continuous.expand((x.shape[0]))
t_input = get_model_input_time(t_continuous)
if cond is None:
output = model(x, t_input, None, **model_kwargs)
else:
output = model(x, t_input, cond, **model_kwargs)
if model_type == "noise":
return output
elif model_type == "x_start":
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
elif model_type == "v":
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
elif model_type == "score":
sigma_t = noise_schedule.marginal_std(t_continuous)
dims = x.dim()
return -expand_dims(sigma_t, dims) * output
def cond_grad_fn(x, t_input, condition):
"""
Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
"""
with torch.enable_grad():
x_in = x.detach().requires_grad_(True)
log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
return torch.autograd.grad(log_prob.sum(), x_in)[0]
def model_fn(x, t_continuous, condition, unconditional_condition):
"""
The noise predicition model function that is used for DPM-Solver.
"""
if t_continuous.reshape((-1,)).shape[0] == 1:
t_continuous = t_continuous.expand((x.shape[0]))
if guidance_type == "uncond":
return noise_pred_fn(x, t_continuous)
elif guidance_type == "classifier":
assert classifier_fn is not None
t_input = get_model_input_time(t_continuous)
cond_grad = cond_grad_fn(x, t_input, condition)
sigma_t = noise_schedule.marginal_std(t_continuous)
noise = noise_pred_fn(x, t_continuous)
return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
elif guidance_type == "classifier-free":
if guidance_scale == 1. or unconditional_condition is None:
return noise_pred_fn(x, t_continuous, cond=condition)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t_continuous] * 2)
if isinstance(condition, dict):
assert isinstance(unconditional_condition, dict)
c_in = dict()
for k in condition:
if isinstance(condition[k], list):
c_in[k] = [torch.cat([
unconditional_condition[k][i],
condition[k][i]]) for i in range(len(condition[k]))]
else:
c_in[k] = torch.cat([
unconditional_condition[k],
condition[k]])
elif isinstance(condition, list):
c_in = list()
assert isinstance(unconditional_condition, list)
for i in range(len(condition)):
c_in.append(torch.cat([unconditional_condition[i], condition[i]]))
else:
c_in = torch.cat([unconditional_condition, condition])
noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
return noise_uncond + guidance_scale * (noise - noise_uncond)
assert model_type in ["noise", "x_start", "v"]
assert guidance_type in ["uncond", "classifier", "classifier-free"]
return model_fn
class UniPC:
def __init__(
self,
model_fn,
noise_schedule,
predict_x0=True,
thresholding=False,
max_val=1.,
variant='bh1',
condition=None,
unconditional_condition=None,
before_sample=None,
after_sample=None,
after_update=None
):
"""Construct a UniPC.
We support both data_prediction and noise_prediction.
"""
self.model_fn_ = model_fn
self.noise_schedule = noise_schedule
self.variant = variant
self.predict_x0 = predict_x0
self.thresholding = thresholding
self.max_val = max_val
self.condition = condition
self.unconditional_condition = unconditional_condition
self.before_sample = before_sample
self.after_sample = after_sample
self.after_update = after_update
def dynamic_thresholding_fn(self, x0, t=None):
"""
The dynamic thresholding method.
"""
dims = x0.dim()
p = self.dynamic_thresholding_ratio
s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)
x0 = torch.clamp(x0, -s, s) / s
return x0
def model(self, x, t):
cond = self.condition
uncond = self.unconditional_condition
if self.before_sample is not None:
x, t, cond, uncond = self.before_sample(x, t, cond, uncond)
res = self.model_fn_(x, t, cond, uncond)
if self.after_sample is not None:
x, t, cond, uncond, res = self.after_sample(x, t, cond, uncond, res)
if isinstance(res, tuple):
# (None, pred_x0)
res = res[1]
return res
def noise_prediction_fn(self, x, t):
"""
Return the noise prediction model.
"""
return self.model(x, t)
def data_prediction_fn(self, x, t):
"""
Return the data prediction model (with thresholding).
"""
noise = self.noise_prediction_fn(x, t)
dims = x.dim()
alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
if self.thresholding:
p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
x0 = torch.clamp(x0, -s, s) / s
return x0
def model_fn(self, x, t):
"""
Convert the model to the noise prediction model or the data prediction model.
"""
if self.predict_x0:
return self.data_prediction_fn(x, t)
else:
return self.noise_prediction_fn(x, t)
def get_time_steps(self, skip_type, t_T, t_0, N, device):
"""Compute the intermediate time steps for sampling.
"""
if skip_type == 'logSNR':
lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
return self.noise_schedule.inverse_lambda(logSNR_steps)
elif skip_type == 'time_uniform':
return torch.linspace(t_T, t_0, N + 1).to(device)
elif skip_type == 'time_quadratic':
t_order = 2
t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
return t
else:
raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
"""
Get the order of each step for sampling by the singlestep DPM-Solver.
"""
if order == 3:
K = steps // 3 + 1
if steps % 3 == 0:
orders = [3,] * (K - 2) + [2, 1]
elif steps % 3 == 1:
orders = [3,] * (K - 1) + [1]
else:
orders = [3,] * (K - 1) + [2]
elif order == 2:
if steps % 2 == 0:
K = steps // 2
orders = [2,] * K
else:
K = steps // 2 + 1
orders = [2,] * (K - 1) + [1]
elif order == 1:
K = steps
orders = [1,] * steps
else:
raise ValueError("'order' must be '1' or '2' or '3'.")
if skip_type == 'logSNR':
# To reproduce the results in DPM-Solver paper
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
else:
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders), 0).to(device)]
return timesteps_outer, orders
def denoise_to_zero_fn(self, x, s):
"""
Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
"""
return self.data_prediction_fn(x, s)
def multistep_uni_pc_update(self, x, model_prev_list, t_prev_list, t, order, **kwargs):
if len(t.shape) == 0:
t = t.view(-1)
if 'bh' in self.variant:
return self.multistep_uni_pc_bh_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
else:
assert self.variant == 'vary_coeff'
return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True):
#print(f'using unified predictor-corrector with order {order} (solver type: vary coeff)')
ns = self.noise_schedule
assert order <= len(model_prev_list)
# first compute rks
t_prev_0 = t_prev_list[-1]
lambda_prev_0 = ns.marginal_lambda(t_prev_0)
lambda_t = ns.marginal_lambda(t)
model_prev_0 = model_prev_list[-1]
sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
log_alpha_t = ns.marginal_log_mean_coeff(t)
alpha_t = torch.exp(log_alpha_t)
h = lambda_t - lambda_prev_0
rks = []
D1s = []
for i in range(1, order):
t_prev_i = t_prev_list[-(i + 1)]
model_prev_i = model_prev_list[-(i + 1)]
lambda_prev_i = ns.marginal_lambda(t_prev_i)
rk = (lambda_prev_i - lambda_prev_0) / h
rks.append(rk)
D1s.append((model_prev_i - model_prev_0) / rk)
rks.append(1.)
rks = torch.tensor(rks, device=x.device)
K = len(rks)
# build C matrix
C = []
col = torch.ones_like(rks)
for k in range(1, K + 1):
C.append(col)
col = col * rks / (k + 1)
C = torch.stack(C, dim=1)
if len(D1s) > 0:
D1s = torch.stack(D1s, dim=1) # (B, K)
C_inv_p = torch.linalg.inv(C[:-1, :-1])
A_p = C_inv_p
if use_corrector:
#print('using corrector')
C_inv = torch.linalg.inv(C)
A_c = C_inv
hh = -h if self.predict_x0 else h
h_phi_1 = torch.expm1(hh)
h_phi_ks = []
factorial_k = 1
h_phi_k = h_phi_1
for k in range(1, K + 2):
h_phi_ks.append(h_phi_k)
h_phi_k = h_phi_k / hh - 1 / factorial_k
factorial_k *= (k + 1)
model_t = None
if self.predict_x0:
x_t_ = (
sigma_t / sigma_prev_0 * x
- alpha_t * h_phi_1 * model_prev_0
)
# now predictor
x_t = x_t_
if len(D1s) > 0:
# compute the residuals for predictor
for k in range(K - 1):
x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
# now corrector
if use_corrector:
model_t = self.model_fn(x_t, t)
D1_t = (model_t - model_prev_0)
x_t = x_t_
k = 0
for k in range(K - 1):
x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
x_t = x_t - alpha_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
else:
log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
x_t_ = (
(torch.exp(log_alpha_t - log_alpha_prev_0)) * x
- (sigma_t * h_phi_1) * model_prev_0
)
# now predictor
x_t = x_t_
if len(D1s) > 0:
# compute the residuals for predictor
for k in range(K - 1):
x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
# now corrector
if use_corrector:
model_t = self.model_fn(x_t, t)
D1_t = (model_t - model_prev_0)
x_t = x_t_
k = 0
for k in range(K - 1):
x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
x_t = x_t - sigma_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
return x_t, model_t
def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order, x_t=None, use_corrector=True):
#print(f'using unified predictor-corrector with order {order} (solver type: B(h))')
ns = self.noise_schedule
assert order <= len(model_prev_list)
dims = x.dim()
# first compute rks
t_prev_0 = t_prev_list[-1]
lambda_prev_0 = ns.marginal_lambda(t_prev_0)
lambda_t = ns.marginal_lambda(t)
model_prev_0 = model_prev_list[-1]
sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
alpha_t = torch.exp(log_alpha_t)
h = lambda_t - lambda_prev_0
rks = []
D1s = []
for i in range(1, order):
t_prev_i = t_prev_list[-(i + 1)]
model_prev_i = model_prev_list[-(i + 1)]
lambda_prev_i = ns.marginal_lambda(t_prev_i)
rk = ((lambda_prev_i - lambda_prev_0) / h)[0]
rks.append(rk)
D1s.append((model_prev_i - model_prev_0) / rk)
rks.append(1.)
rks = torch.tensor(rks, device=x.device)
R = []
b = []
hh = -h[0] if self.predict_x0 else h[0]
h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1
h_phi_k = h_phi_1 / hh - 1
factorial_i = 1
if self.variant == 'bh1':
B_h = hh
elif self.variant == 'bh2':
B_h = torch.expm1(hh)
else:
raise NotImplementedError()
for i in range(1, order + 1):
R.append(torch.pow(rks, i - 1))
b.append(h_phi_k * factorial_i / B_h)
factorial_i *= (i + 1)
h_phi_k = h_phi_k / hh - 1 / factorial_i
R = torch.stack(R)
b = torch.tensor(b, device=x.device)
# now predictor
use_predictor = len(D1s) > 0 and x_t is None
if len(D1s) > 0:
D1s = torch.stack(D1s, dim=1) # (B, K)
if x_t is None:
# for order 2, we use a simplified version
if order == 2:
rhos_p = torch.tensor([0.5], device=b.device)
else:
rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1])
else:
D1s = None
if use_corrector:
#print('using corrector')
# for order 1, we use a simplified version
if order == 1:
rhos_c = torch.tensor([0.5], device=b.device)
else:
rhos_c = torch.linalg.solve(R, b)
model_t = None
if self.predict_x0:
x_t_ = (
expand_dims(sigma_t / sigma_prev_0, dims) * x
- expand_dims(alpha_t * h_phi_1, dims)* model_prev_0
)
if x_t is None:
if use_predictor:
pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
else:
pred_res = 0
x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * pred_res
if use_corrector:
model_t = self.model_fn(x_t, t)
if D1s is not None:
corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
else:
corr_res = 0
D1_t = (model_t - model_prev_0)
x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
else:
x_t_ = (
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
- expand_dims(sigma_t * h_phi_1, dims) * model_prev_0
)
if x_t is None:
if use_predictor:
pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
else:
pred_res = 0
x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * pred_res
if use_corrector:
model_t = self.model_fn(x_t, t)
if D1s is not None:
corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
else:
corr_res = 0
D1_t = (model_t - model_prev_0)
x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
return x_t, model_t
def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
atol=0.0078, rtol=0.05, corrector=False,
):
t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
t_T = self.noise_schedule.T if t_start is None else t_start
device = x.device
if method == 'multistep':
assert steps >= order, "UniPC order must be < sampling steps"
timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
#print(f"Running UniPC Sampling with {timesteps.shape[0]} timesteps, order {order}")
assert timesteps.shape[0] - 1 == steps
with torch.no_grad():
vec_t = timesteps[0].expand((x.shape[0]))
model_prev_list = [self.model_fn(x, vec_t)]
t_prev_list = [vec_t]
# Init the first `order` values by lower order multistep DPM-Solver.
for init_order in range(1, order):
vec_t = timesteps[init_order].expand(x.shape[0])
x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, init_order, use_corrector=True)
if model_x is None:
model_x = self.model_fn(x, vec_t)
if self.after_update is not None:
self.after_update(x, model_x)
model_prev_list.append(model_x)
t_prev_list.append(vec_t)
for step in trange(order, steps + 1):
vec_t = timesteps[step].expand(x.shape[0])
if lower_order_final:
step_order = min(order, steps + 1 - step)
else:
step_order = order
#print('this step order:', step_order)
if step == steps:
#print('do not run corrector at the last step')
use_corrector = False
else:
use_corrector = True
x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, step_order, use_corrector=use_corrector)
if self.after_update is not None:
self.after_update(x, model_x)
for i in range(order - 1):
t_prev_list[i] = t_prev_list[i + 1]
model_prev_list[i] = model_prev_list[i + 1]
t_prev_list[-1] = vec_t
# We do not need to evaluate the final model value.
if step < steps:
if model_x is None:
model_x = self.model_fn(x, vec_t)
model_prev_list[-1] = model_x
else:
raise NotImplementedError()
if denoise_to_zero:
x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
return x
#############################################################
# other utility functions
#############################################################
def interpolate_fn(x, xp, yp):
"""
A piecewise linear function y = f(x), using xp and yp as keypoints.
We implement f(x) in a differentiable way (i.e. applicable for autograd).
The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
Args:
x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
yp: PyTorch tensor with shape [C, K].
Returns:
The function values f(x), with shape [N, C].
"""
N, K = x.shape[0], xp.shape[1]
all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
sorted_all_x, x_indices = torch.sort(all_x, dim=2)
x_idx = torch.argmin(x_indices, dim=2)
cand_start_idx = x_idx - 1
start_idx = torch.where(
torch.eq(x_idx, 0),
torch.tensor(1, device=x.device),
torch.where(
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
),
)
end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
start_idx2 = torch.where(
torch.eq(x_idx, 0),
torch.tensor(0, device=x.device),
torch.where(
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
),
)
y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
return cand
def expand_dims(v, dims):
"""
Expand the tensor `v` to the dim `dims`.
Args:
`v`: a PyTorch tensor with shape [N].
`dim`: a `int`.
Returns:
a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
"""
return v[(...,) + (None,)*(dims - 1)]

View File

@ -1,10 +1,11 @@
import argparse
import os import os
import sys import sys
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir
import modules.safe import modules.safe
script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
models_path = os.path.join(script_path, "models") # data_path = cmd_opts_pre.data
sys.path.insert(0, script_path) sys.path.insert(0, script_path)
# search for directory of stable diffusion in following places # search for directory of stable diffusion in following places

22
modules/paths_internal.py Normal file
View File

@ -0,0 +1,22 @@
"""this module defines internal paths used by program and is safe to import before dependencies are installed in launch.py"""
import argparse
import os
script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sd_configs_path = os.path.join(script_path, "configs")
sd_default_config = os.path.join(sd_configs_path, "v1-inference.yaml")
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
# Parse the --data-dir flag first so we can use it as a base for our other argument default values
parser_pre = argparse.ArgumentParser(add_help=False)
parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored",)
cmd_opts_pre = parser_pre.parse_known_args()[0]
data_path = cmd_opts_pre.data_dir
models_path = os.path.join(data_path, "models")
extensions_dir = os.path.join(data_path, "extensions")
extensions_builtin_dir = os.path.join(script_path, "extensions-builtin")

View File

@ -13,10 +13,11 @@ from skimage import exposure
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
import modules.sd_hijack import modules.sd_hijack
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts
from modules.sd_hijack import model_hijack from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state from modules.shared import opts, cmd_opts, state
import modules.shared as shared import modules.shared as shared
import modules.paths as paths
import modules.face_restoration import modules.face_restoration
import modules.images as images import modules.images as images
import modules.styles import modules.styles
@ -77,22 +78,28 @@ def apply_overlay(image, paste_loc, index, overlays):
def txt2img_image_conditioning(sd_model, x, width, height): def txt2img_image_conditioning(sd_model, x, width, height):
if sd_model.model.conditioning_key not in {'hybrid', 'concat'}: if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
# Dummy zero conditioning if we're not using inpainting model.
# The "masked-image" in this case will just be all zeros since the entire image is masked.
image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
image_conditioning = image_conditioning.to(x.dtype)
return image_conditioning
elif sd_model.model.conditioning_key == "crossattn-adm": # UnCLIP models
return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
else:
# Dummy zero conditioning if we're not using inpainting or unclip models.
# Still takes up a bit of memory, but no encoder call. # Still takes up a bit of memory, but no encoder call.
# Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
# The "masked-image" in this case will just be all zeros since the entire image is masked.
image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
# Add the fake full 1s mask to the first dimension.
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
image_conditioning = image_conditioning.to(x.dtype)
return image_conditioning
class StableDiffusionProcessing: class StableDiffusionProcessing:
""" """
@ -184,7 +191,20 @@ class StableDiffusionProcessing:
conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1. conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1.
return conditioning return conditioning
def inpainting_image_conditioning(self, source_image, latent_image, image_mask = None): def edit_image_conditioning(self, source_image):
conditioning_image = self.sd_model.encode_first_stage(source_image).mode()
return conditioning_image
def unclip_image_conditioning(self, source_image):
c_adm = self.sd_model.embedder(source_image)
if self.sd_model.noise_augmentor is not None:
noise_level = 0 # TODO: Allow other noise levels?
c_adm, noise_level_emb = self.sd_model.noise_augmentor(c_adm, noise_level=repeat(torch.tensor([noise_level]).to(c_adm.device), '1 -> b', b=c_adm.shape[0]))
c_adm = torch.cat((c_adm, noise_level_emb), 1)
return c_adm
def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
self.is_using_inpainting_conditioning = True self.is_using_inpainting_conditioning = True
# Handle the different mask inputs # Handle the different mask inputs
@ -203,7 +223,7 @@ class StableDiffusionProcessing:
# Create another latent image, this time with a masked version of the original input. # Create another latent image, this time with a masked version of the original input.
# Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter. # Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
conditioning_mask = conditioning_mask.to(source_image.device).to(source_image.dtype) conditioning_mask = conditioning_mask.to(device=source_image.device, dtype=source_image.dtype)
conditioning_image = torch.lerp( conditioning_image = torch.lerp(
source_image, source_image,
source_image * (1.0 - conditioning_mask), source_image * (1.0 - conditioning_mask),
@ -222,14 +242,22 @@ class StableDiffusionProcessing:
return image_conditioning return image_conditioning
def img2img_image_conditioning(self, source_image, latent_image, image_mask=None): def img2img_image_conditioning(self, source_image, latent_image, image_mask=None):
source_image = devices.cond_cast_float(source_image)
# HACK: Using introspection as the Depth2Image model doesn't appear to uniquely # HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
# identify itself with a field common to all models. The conditioning_key is also hybrid. # identify itself with a field common to all models. The conditioning_key is also hybrid.
if isinstance(self.sd_model, LatentDepth2ImageDiffusion): if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
return self.depth2img_image_conditioning(source_image) return self.depth2img_image_conditioning(source_image)
if self.sd_model.cond_stage_key == "edit":
return self.edit_image_conditioning(source_image)
if self.sampler.conditioning_key in {'hybrid', 'concat'}: if self.sampler.conditioning_key in {'hybrid', 'concat'}:
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask) return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
if self.sampler.conditioning_key == "crossattn-adm":
return self.unclip_image_conditioning(source_image)
# Dummy zero conditioning if we're not using inpainting or depth model. # Dummy zero conditioning if we're not using inpainting or depth model.
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1) return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
@ -257,6 +285,7 @@ class Processed:
self.height = p.height self.height = p.height
self.sampler_name = p.sampler_name self.sampler_name = p.sampler_name
self.cfg_scale = p.cfg_scale self.cfg_scale = p.cfg_scale
self.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
self.steps = p.steps self.steps = p.steps
self.batch_size = p.batch_size self.batch_size = p.batch_size
self.restore_faces = p.restore_faces self.restore_faces = p.restore_faces
@ -434,19 +463,17 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Steps": p.steps, "Steps": p.steps,
"Sampler": p.sampler_name, "Sampler": p.sampler_name,
"CFG scale": p.cfg_scale, "CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
"Seed": all_seeds[index], "Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None), "Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}", "Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None), "Denoising strength": getattr(p, 'denoising_strength', None),
"Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
"Clip skip": None if clip_skip <= 1 else clip_skip, "Clip skip": None if clip_skip <= 1 else clip_skip,
"ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
} }
@ -533,8 +560,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
model_hijack.embedding_db.load_textual_inversion_embeddings() model_hijack.embedding_db.load_textual_inversion_embeddings()
_, extra_network_data = extra_networks.parse_prompts(p.all_prompts[0:1])
if p.scripts is not None: if p.scripts is not None:
p.scripts.process(p) p.scripts.process(p)
@ -568,16 +593,14 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
with devices.autocast(): with devices.autocast():
p.init(p.all_prompts, p.all_seeds, p.all_subseeds) p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
if not p.disable_extra_networks: # for OSX, loading the model during sampling changes the generated picture, so it is loaded here
extra_networks.activate(p, extra_network_data) if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
sd_vae_approx.model()
with open(os.path.join(shared.script_path, "params.txt"), "w", encoding="utf8") as file:
processed = Processed(p, [], p.seed, "")
file.write(processed.infotext(p, 0))
if state.job_count == -1: if state.job_count == -1:
state.job_count = p.n_iter state.job_count = p.n_iter
extra_network_data = None
for n in range(p.n_iter): for n in range(p.n_iter):
p.iteration = n p.iteration = n
@ -592,14 +615,30 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size] seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size] subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
if p.scripts is not None:
p.scripts.before_process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
if len(prompts) == 0: if len(prompts) == 0:
break break
prompts, _ = extra_networks.parse_prompts(prompts) prompts, extra_network_data = extra_networks.parse_prompts(prompts)
if not p.disable_extra_networks:
with devices.autocast():
extra_networks.activate(p, extra_network_data)
if p.scripts is not None: if p.scripts is not None:
p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
# params.txt should be saved after scripts.process_batch, since the
# infotext could be modified by that callback
# Example: a wildcard processed by process_batch sets an extra model
# strength, which is saved as "Model Strength: 1.0" in the infotext
if n == 0:
with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
processed = Processed(p, [], p.seed, "")
file.write(processed.infotext(p, 0))
uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc)
c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c)
@ -610,7 +649,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if p.n_iter > 1: if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}" shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.autocast(): with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))] x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))]
@ -645,6 +684,11 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
image = Image.fromarray(x_sample) image = Image.fromarray(x_sample)
if p.scripts is not None:
pp = scripts.PostprocessImageArgs(image)
p.scripts.postprocess_image(p, pp)
image = pp.image
if p.color_corrections is not None and i < len(p.color_corrections): if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction: if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images) image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
@ -662,6 +706,22 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
image.info["parameters"] = text image.info["parameters"] = text
output_images.append(image) output_images.append(image)
if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay:
image_mask = p.mask_for_overlay.convert('RGB')
image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), p.mask_for_overlay.convert('L')).convert('RGBA')
if opts.save_mask:
images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask")
if opts.save_mask_composite:
images.save_image(image_mask_composite, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite")
if opts.return_mask:
output_images.append(image_mask)
if opts.return_mask_composite:
output_images.append(image_mask_composite)
del x_samples_ddim del x_samples_ddim
devices.torch_gc() devices.torch_gc()
@ -686,7 +746,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if opts.grid_save: if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True) images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
if not p.disable_extra_networks: if not p.disable_extra_networks and extra_network_data:
extra_networks.deactivate(p, extra_network_data) extra_networks.deactivate(p, extra_network_data)
devices.torch_gc() devices.torch_gc()
@ -865,7 +925,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
shared.state.nextjob() shared.state.nextjob()
img2img_sampler_name = self.sampler_name if self.sampler_name != 'PLMS' else 'DDIM' # PLMS does not support img2img so we just silently switch ot DDIM img2img_sampler_name = self.sampler_name
if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM
img2img_sampler_name = 'DDIM'
self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model) self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2] samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
@ -884,12 +946,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
sampler = None sampler = None
def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs): def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.init_images = init_images self.init_images = init_images
self.resize_mode: int = resize_mode self.resize_mode: int = resize_mode
self.denoising_strength: float = denoising_strength self.denoising_strength: float = denoising_strength
self.image_cfg_scale: float = image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
self.init_latent = None self.init_latent = None
self.image_mask = mask self.image_mask = mask
self.latent_mask = None self.latent_mask = None

View File

@ -46,7 +46,7 @@ class UpscalerRealESRGAN(Upscaler):
scale=info.scale, scale=info.scale,
model_path=info.local_data_path, model_path=info.local_data_path,
model=info.model(), model=info.model(),
half=not cmd_opts.no_half, half=not cmd_opts.no_half and not cmd_opts.upcast_sampling,
tile=opts.ESRGAN_tile, tile=opts.ESRGAN_tile,
tile_pad=opts.ESRGAN_tile_overlap, tile_pad=opts.ESRGAN_tile_overlap,
) )

View File

@ -29,7 +29,7 @@ class ImageSaveParams:
class CFGDenoiserParams: class CFGDenoiserParams:
def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps): def __init__(self, x, image_cond, sigma, sampling_step, total_sampling_steps, text_cond, text_uncond):
self.x = x self.x = x
"""Latent image representation in the process of being denoised""" """Latent image representation in the process of being denoised"""
@ -44,6 +44,24 @@ class CFGDenoiserParams:
self.total_sampling_steps = total_sampling_steps self.total_sampling_steps = total_sampling_steps
"""Total number of sampling steps planned""" """Total number of sampling steps planned"""
self.text_cond = text_cond
""" Encoder hidden states of text conditioning from prompt"""
self.text_uncond = text_uncond
""" Encoder hidden states of text conditioning from negative prompt"""
class CFGDenoisedParams:
def __init__(self, x, sampling_step, total_sampling_steps):
self.x = x
"""Latent image representation in the process of being denoised"""
self.sampling_step = sampling_step
"""Current Sampling step number"""
self.total_sampling_steps = total_sampling_steps
"""Total number of sampling steps planned"""
class UiTrainTabParams: class UiTrainTabParams:
@ -68,6 +86,7 @@ callback_map = dict(
callbacks_before_image_saved=[], callbacks_before_image_saved=[],
callbacks_image_saved=[], callbacks_image_saved=[],
callbacks_cfg_denoiser=[], callbacks_cfg_denoiser=[],
callbacks_cfg_denoised=[],
callbacks_before_component=[], callbacks_before_component=[],
callbacks_after_component=[], callbacks_after_component=[],
callbacks_image_grid=[], callbacks_image_grid=[],
@ -150,6 +169,14 @@ def cfg_denoiser_callback(params: CFGDenoiserParams):
report_exception(c, 'cfg_denoiser_callback') report_exception(c, 'cfg_denoiser_callback')
def cfg_denoised_callback(params: CFGDenoisedParams):
for c in callback_map['callbacks_cfg_denoised']:
try:
c.callback(params)
except Exception:
report_exception(c, 'cfg_denoised_callback')
def before_component_callback(component, **kwargs): def before_component_callback(component, **kwargs):
for c in callback_map['callbacks_before_component']: for c in callback_map['callbacks_before_component']:
try: try:
@ -283,6 +310,14 @@ def on_cfg_denoiser(callback):
add_callback(callback_map['callbacks_cfg_denoiser'], callback) add_callback(callback_map['callbacks_cfg_denoiser'], callback)
def on_cfg_denoised(callback):
"""register a function to be called in the kdiffussion cfg_denoiser method after building the inner model inputs.
The callback is called with one argument:
- params: CFGDenoisedParams - parameters to be passed to the inner model and sampling state details.
"""
add_callback(callback_map['callbacks_cfg_denoised'], callback)
def on_before_component(callback): def on_before_component(callback):
"""register a function to be called before a component is created. """register a function to be called before a component is created.
The callback is called with arguments: The callback is called with arguments:

View File

@ -1,16 +1,14 @@
import os import os
import sys import sys
import traceback import traceback
import importlib.util
from types import ModuleType from types import ModuleType
def load_module(path): def load_module(path):
with open(path, "r", encoding="utf8") as file: module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path)
text = file.read() module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
compiled = compile(text, path, 'exec')
module = ModuleType(os.path.basename(path))
exec(compiled, module.__dict__)
return module return module

View File

@ -6,12 +6,16 @@ from collections import namedtuple
import gradio as gr import gradio as gr
from modules.processing import StableDiffusionProcessing
from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing
AlwaysVisible = object() AlwaysVisible = object()
class PostprocessImageArgs:
def __init__(self, image):
self.image = image
class Script: class Script:
filename = None filename = None
args_from = None args_from = None
@ -29,6 +33,11 @@ class Script:
parsing infotext to set the value for the component; see ui.py's txt2img_paste_fields for an example parsing infotext to set the value for the component; see ui.py's txt2img_paste_fields for an example
""" """
paste_field_names = None
"""if set in ui(), this is a list of names of infotext fields; the fields will be sent through the
various "Send to <X>" buttons when clicked
"""
def title(self): def title(self):
"""this function should return the title of the script. This is what will be displayed in the dropdown menu.""" """this function should return the title of the script. This is what will be displayed in the dropdown menu."""
@ -65,7 +74,7 @@ class Script:
args contains all values returned by components from ui() args contains all values returned by components from ui()
""" """
raise NotImplementedError() pass
def process(self, p, *args): def process(self, p, *args):
""" """
@ -76,6 +85,20 @@ class Script:
pass pass
def before_process_batch(self, p, *args, **kwargs):
"""
Called before extra networks are parsed from the prompt, so you can add
new extra network keywords to the prompt with this callback.
**kwargs will have those items:
- batch_number - index of current batch, from 0 to number of batches-1
- prompts - list of prompts for current batch; you can change contents of this list but changing the number of entries will likely break things
- seeds - list of seeds for current batch
- subseeds - list of subseeds for current batch
"""
pass
def process_batch(self, p, *args, **kwargs): def process_batch(self, p, *args, **kwargs):
""" """
Same as process(), but called for every batch. Same as process(), but called for every batch.
@ -100,6 +123,13 @@ class Script:
pass pass
def postprocess_image(self, p, pp: PostprocessImageArgs, *args):
"""
Called for every image after it has been generated.
"""
pass
def postprocess(self, p, processed, *args): def postprocess(self, p, processed, *args):
""" """
This function is called after processing ends for AlwaysVisible scripts. This function is called after processing ends for AlwaysVisible scripts.
@ -209,7 +239,15 @@ def load_scripts():
elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing): elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing):
postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module)) postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module))
for scriptfile in sorted(scripts_list): def orderby(basedir):
# 1st webui, 2nd extensions-builtin, 3rd extensions
priority = {os.path.join(paths.script_path, "extensions-builtin"):1, paths.script_path:0}
for key in priority:
if basedir.startswith(key):
return priority[key]
return 9999
for scriptfile in sorted(scripts_list, key=lambda x: [orderby(x.basedir), x]):
try: try:
if scriptfile.basedir != paths.script_path: if scriptfile.basedir != paths.script_path:
sys.path = [scriptfile.basedir] + sys.path sys.path = [scriptfile.basedir] + sys.path
@ -245,13 +283,18 @@ class ScriptRunner:
self.alwayson_scripts = [] self.alwayson_scripts = []
self.titles = [] self.titles = []
self.infotext_fields = [] self.infotext_fields = []
self.paste_field_names = []
def initialize_scripts(self, is_img2img): def initialize_scripts(self, is_img2img):
from modules import scripts_auto_postprocessing
self.scripts.clear() self.scripts.clear()
self.alwayson_scripts.clear() self.alwayson_scripts.clear()
self.selectable_scripts.clear() self.selectable_scripts.clear()
for script_class, path, basedir, script_module in scripts_data: auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data()
for script_class, path, basedir, script_module in auto_processing_scripts + scripts_data:
script = script_class() script = script_class()
script.filename = path script.filename = path
script.is_txt2img = not is_img2img script.is_txt2img = not is_img2img
@ -289,6 +332,9 @@ class ScriptRunner:
if script.infotext_fields is not None: if script.infotext_fields is not None:
self.infotext_fields += script.infotext_fields self.infotext_fields += script.infotext_fields
if script.paste_field_names is not None:
self.paste_field_names += script.paste_field_names
inputs += controls inputs += controls
inputs_alwayson += [script.alwayson for _ in controls] inputs_alwayson += [script.alwayson for _ in controls]
script.args_to = len(inputs) script.args_to = len(inputs)
@ -330,9 +376,23 @@ class ScriptRunner:
outputs=[script.group for script in self.selectable_scripts] outputs=[script.group for script in self.selectable_scripts]
) )
self.script_load_ctr = 0
def onload_script_visibility(params):
title = params.get('Script', None)
if title:
title_index = self.titles.index(title)
visibility = title_index == self.script_load_ctr
self.script_load_ctr = (self.script_load_ctr + 1) % len(self.titles)
return gr.update(visible=visibility)
else:
return gr.update(visible=False)
self.infotext_fields.append( (dropdown, lambda x: gr.update(value=x.get('Script', 'None'))) )
self.infotext_fields.extend( [(script.group, onload_script_visibility) for script in self.selectable_scripts] )
return inputs return inputs
def run(self, p: StableDiffusionProcessing, *args): def run(self, p, *args):
script_index = args[0] script_index = args[0]
if script_index == 0: if script_index == 0:
@ -359,6 +419,15 @@ class ScriptRunner:
print(f"Error running process: {script.filename}", file=sys.stderr) print(f"Error running process: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
def before_process_batch(self, p, **kwargs):
for script in self.alwayson_scripts:
try:
script_args = p.script_args[script.args_from:script.args_to]
script.before_process_batch(p, *script_args, **kwargs)
except Exception:
print(f"Error running before_process_batch: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def process_batch(self, p, **kwargs): def process_batch(self, p, **kwargs):
for script in self.alwayson_scripts: for script in self.alwayson_scripts:
try: try:
@ -386,6 +455,15 @@ class ScriptRunner:
print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr) print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
def postprocess_image(self, p, pp: PostprocessImageArgs):
for script in self.alwayson_scripts:
try:
script_args = p.script_args[script.args_from:script.args_to]
script.postprocess_image(p, pp, *script_args)
except Exception:
print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def before_component(self, component, **kwargs): def before_component(self, component, **kwargs):
for script in self.scripts: for script in self.scripts:
try: try:
@ -443,6 +521,18 @@ def reload_scripts():
scripts_postproc = scripts_postprocessing.ScriptPostprocessingRunner() scripts_postproc = scripts_postprocessing.ScriptPostprocessingRunner()
def add_classes_to_gradio_component(comp):
"""
this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
"""
comp.elem_classes = ["gradio-" + comp.get_block_name(), *(comp.elem_classes or [])]
if getattr(comp, 'multiselect', False):
comp.elem_classes.append('multiselect')
def IOComponent_init(self, *args, **kwargs): def IOComponent_init(self, *args, **kwargs):
if scripts_current is not None: if scripts_current is not None:
scripts_current.before_component(self, **kwargs) scripts_current.before_component(self, **kwargs)
@ -451,6 +541,8 @@ def IOComponent_init(self, *args, **kwargs):
res = original_IOComponent_init(self, *args, **kwargs) res = original_IOComponent_init(self, *args, **kwargs)
add_classes_to_gradio_component(self)
script_callbacks.after_component_callback(self, **kwargs) script_callbacks.after_component_callback(self, **kwargs)
if scripts_current is not None: if scripts_current is not None:
@ -461,3 +553,15 @@ def IOComponent_init(self, *args, **kwargs):
original_IOComponent_init = gr.components.IOComponent.__init__ original_IOComponent_init = gr.components.IOComponent.__init__
gr.components.IOComponent.__init__ = IOComponent_init gr.components.IOComponent.__init__ = IOComponent_init
def BlockContext_init(self, *args, **kwargs):
res = original_BlockContext_init(self, *args, **kwargs)
add_classes_to_gradio_component(self)
return res
original_BlockContext_init = gr.blocks.BlockContext.__init__
gr.blocks.BlockContext.__init__ = BlockContext_init

View File

@ -0,0 +1,42 @@
from modules import scripts, scripts_postprocessing, shared
class ScriptPostprocessingForMainUI(scripts.Script):
def __init__(self, script_postproc):
self.script: scripts_postprocessing.ScriptPostprocessing = script_postproc
self.postprocessing_controls = None
def title(self):
return self.script.name
def show(self, is_img2img):
return scripts.AlwaysVisible
def ui(self, is_img2img):
self.postprocessing_controls = self.script.ui()
return self.postprocessing_controls.values()
def postprocess_image(self, p, script_pp, *args):
args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)}
pp = scripts_postprocessing.PostprocessedImage(script_pp.image)
pp.info = {}
self.script.process(pp, **args_dict)
p.extra_generation_params.update(pp.info)
script_pp.image = pp.image
def create_auto_preprocessing_script_data():
from modules import scripts
res = []
for name in shared.opts.postprocessing_enable_in_main_ui:
script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None)
if script is None:
continue
constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class())
res.append(scripts.ScriptClassData(script_class=constructor, path=script.path, basedir=script.basedir, module=script.module))
return res

View File

@ -46,6 +46,8 @@ class ScriptPostprocessing:
pass pass
def wrap_call(func, filename, funcname, *args, default=None, **kwargs): def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
try: try:
res = func(*args, **kwargs) res = func(*args, **kwargs)
@ -68,6 +70,9 @@ class ScriptPostprocessingRunner:
script: ScriptPostprocessing = script_class() script: ScriptPostprocessing = script_class()
script.filename = path script.filename = path
if script.name == "Simple Upscale":
continue
self.scripts.append(script) self.scripts.append(script)
def create_script_ui(self, script, inputs): def create_script_ui(self, script, inputs):
@ -87,12 +92,11 @@ class ScriptPostprocessingRunner:
import modules.scripts import modules.scripts
self.initialize_scripts(modules.scripts.postprocessing_scripts_data) self.initialize_scripts(modules.scripts.postprocessing_scripts_data)
scripts_order = [x.lower().strip() for x in shared.opts.postprocessing_scipts_order.split(",")] scripts_order = shared.opts.postprocessing_operation_order
def script_score(name): def script_score(name):
name = name.lower()
for i, possible_match in enumerate(scripts_order): for i, possible_match in enumerate(scripts_order):
if possible_match in name: if possible_match == name:
return i return i
return len(self.scripts) return len(self.scripts)
@ -105,7 +109,7 @@ class ScriptPostprocessingRunner:
inputs = [] inputs = []
for script in self.scripts_in_preferred_order(): for script in self.scripts_in_preferred_order():
with gr.Box() as group: with gr.Row() as group:
self.create_script_ui(script, inputs) self.create_script_ui(script, inputs)
script.group = group script.group = group
@ -145,3 +149,4 @@ class ScriptPostprocessingRunner:
def image_changed(self): def image_changed(self):
for script in self.scripts_in_preferred_order(): for script in self.scripts_in_preferred_order():
script.image_changed() script.image_changed()

View File

@ -20,8 +20,9 @@ class DisableInitialization:
``` ```
""" """
def __init__(self): def __init__(self, disable_clip=True):
self.replaced = [] self.replaced = []
self.disable_clip = disable_clip
def replace(self, obj, field, func): def replace(self, obj, field, func):
original = getattr(obj, field, None) original = getattr(obj, field, None)
@ -75,12 +76,14 @@ class DisableInitialization:
self.replace(torch.nn.init, 'kaiming_uniform_', do_nothing) self.replace(torch.nn.init, 'kaiming_uniform_', do_nothing)
self.replace(torch.nn.init, '_no_grad_normal_', do_nothing) self.replace(torch.nn.init, '_no_grad_normal_', do_nothing)
self.replace(torch.nn.init, '_no_grad_uniform_', do_nothing) self.replace(torch.nn.init, '_no_grad_uniform_', do_nothing)
self.create_model_and_transforms = self.replace(open_clip, 'create_model_and_transforms', create_model_and_transforms_without_pretrained)
self.CLIPTextModel_from_pretrained = self.replace(ldm.modules.encoders.modules.CLIPTextModel, 'from_pretrained', CLIPTextModel_from_pretrained) if self.disable_clip:
self.transformers_modeling_utils_load_pretrained_model = self.replace(transformers.modeling_utils.PreTrainedModel, '_load_pretrained_model', transformers_modeling_utils_load_pretrained_model) self.create_model_and_transforms = self.replace(open_clip, 'create_model_and_transforms', create_model_and_transforms_without_pretrained)
self.transformers_tokenization_utils_base_cached_file = self.replace(transformers.tokenization_utils_base, 'cached_file', transformers_tokenization_utils_base_cached_file) self.CLIPTextModel_from_pretrained = self.replace(ldm.modules.encoders.modules.CLIPTextModel, 'from_pretrained', CLIPTextModel_from_pretrained)
self.transformers_configuration_utils_cached_file = self.replace(transformers.configuration_utils, 'cached_file', transformers_configuration_utils_cached_file) self.transformers_modeling_utils_load_pretrained_model = self.replace(transformers.modeling_utils.PreTrainedModel, '_load_pretrained_model', transformers_modeling_utils_load_pretrained_model)
self.transformers_utils_hub_get_from_cache = self.replace(transformers.utils.hub, 'get_from_cache', transformers_utils_hub_get_from_cache) self.transformers_tokenization_utils_base_cached_file = self.replace(transformers.tokenization_utils_base, 'cached_file', transformers_tokenization_utils_base_cached_file)
self.transformers_configuration_utils_cached_file = self.replace(transformers.configuration_utils, 'cached_file', transformers_configuration_utils_cached_file)
self.transformers_utils_hub_get_from_cache = self.replace(transformers.utils.hub, 'get_from_cache', transformers_utils_hub_get_from_cache)
def __exit__(self, exc_type, exc_val, exc_tb): def __exit__(self, exc_type, exc_val, exc_tb):
for obj, field, original in self.replaced: for obj, field, original in self.replaced:

View File

@ -1,5 +1,6 @@
import torch import torch
from torch.nn.functional import silu from torch.nn.functional import silu
from types import MethodType
import modules.textual_inversion.textual_inversion import modules.textual_inversion.textual_inversion
from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint
@ -36,11 +37,23 @@ def apply_optimizations():
optimization_method = None optimization_method = None
can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) # not everyone has torch 2.x to use sdp
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)): if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
print("Applying xformers cross attention optimization.") print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
optimization_method = 'xformers' optimization_method = 'xformers'
elif cmd_opts.opt_sdp_no_mem_attention and can_use_sdp:
print("Applying scaled dot product cross attention optimization (without memory efficient attention).")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.scaled_dot_product_no_mem_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.sdp_no_mem_attnblock_forward
optimization_method = 'sdp-no-mem'
elif cmd_opts.opt_sdp_attention and can_use_sdp:
print("Applying scaled dot product cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.scaled_dot_product_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.sdp_attnblock_forward
optimization_method = 'sdp'
elif cmd_opts.opt_sub_quad_attention: elif cmd_opts.opt_sub_quad_attention:
print("Applying sub-quadratic cross attention optimization.") print("Applying sub-quadratic cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.sub_quad_attention_forward ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.sub_quad_attention_forward
@ -76,6 +89,54 @@ def fix_checkpoint():
pass pass
def weighted_loss(sd_model, pred, target, mean=True):
#Calculate the weight normally, but ignore the mean
loss = sd_model._old_get_loss(pred, target, mean=False)
#Check if we have weights available
weight = getattr(sd_model, '_custom_loss_weight', None)
if weight is not None:
loss *= weight
#Return the loss, as mean if specified
return loss.mean() if mean else loss
def weighted_forward(sd_model, x, c, w, *args, **kwargs):
try:
#Temporarily append weights to a place accessible during loss calc
sd_model._custom_loss_weight = w
#Replace 'get_loss' with a weight-aware one. Otherwise we need to reimplement 'forward' completely
#Keep 'get_loss', but don't overwrite the previous old_get_loss if it's already set
if not hasattr(sd_model, '_old_get_loss'):
sd_model._old_get_loss = sd_model.get_loss
sd_model.get_loss = MethodType(weighted_loss, sd_model)
#Run the standard forward function, but with the patched 'get_loss'
return sd_model.forward(x, c, *args, **kwargs)
finally:
try:
#Delete temporary weights if appended
del sd_model._custom_loss_weight
except AttributeError as e:
pass
#If we have an old loss function, reset the loss function to the original one
if hasattr(sd_model, '_old_get_loss'):
sd_model.get_loss = sd_model._old_get_loss
del sd_model._old_get_loss
def apply_weighted_forward(sd_model):
#Add new function 'weighted_forward' that can be called to calc weighted loss
sd_model.weighted_forward = MethodType(weighted_forward, sd_model)
def undo_weighted_forward(sd_model):
try:
del sd_model.weighted_forward
except AttributeError as e:
pass
class StableDiffusionModelHijack: class StableDiffusionModelHijack:
fixes = None fixes = None
comments = [] comments = []
@ -104,6 +165,10 @@ class StableDiffusionModelHijack:
m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self)
m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
apply_weighted_forward(m)
if m.cond_stage_key == "edit":
sd_hijack_unet.hijack_ddpm_edit()
self.optimization_method = apply_optimizations() self.optimization_method = apply_optimizations()
self.clip = m.cond_stage_model self.clip = m.cond_stage_model
@ -131,6 +196,9 @@ class StableDiffusionModelHijack:
m.cond_stage_model.wrapped.model.token_embedding = m.cond_stage_model.wrapped.model.token_embedding.wrapped m.cond_stage_model.wrapped.model.token_embedding = m.cond_stage_model.wrapped.model.token_embedding.wrapped
m.cond_stage_model = m.cond_stage_model.wrapped m.cond_stage_model = m.cond_stage_model.wrapped
undo_optimizations()
undo_weighted_forward(m)
self.apply_circular(False) self.apply_circular(False)
self.layers = None self.layers = None
self.clip = None self.clip = None
@ -171,7 +239,7 @@ class EmbeddingsWithFixes(torch.nn.Module):
vecs = [] vecs = []
for fixes, tensor in zip(batch_fixes, inputs_embeds): for fixes, tensor in zip(batch_fixes, inputs_embeds):
for offset, embedding in fixes: for offset, embedding in fixes:
emb = embedding.vec emb = devices.cond_cast_unet(embedding.vec)
emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0]) emb_len = min(tensor.shape[0] - offset - 1, emb.shape[0])
tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]]) tensor = torch.cat([tensor[0:offset + 1], emb[0:emb_len], tensor[offset + 1 + emb_len:]])

View File

@ -11,6 +11,7 @@ import ldm.models.diffusion.plms
from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.models.diffusion.ddpm import LatentDiffusion
from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.plms import PLMSSampler
from ldm.models.diffusion.ddim import DDIMSampler, noise_like from ldm.models.diffusion.ddim import DDIMSampler, noise_like
from ldm.models.diffusion.sampling_util import norm_thresholding
@torch.no_grad() @torch.no_grad()
@ -96,15 +97,6 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F
return x_prev, pred_x0, e_t return x_prev, pred_x0, e_t
def should_hijack_inpainting(checkpoint_info):
from modules import sd_models
ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
cfg_basename = os.path.basename(sd_models.find_checkpoint_config(checkpoint_info)).lower()
return "inpainting" in ckpt_basename and not "inpainting" in cfg_basename
def do_inpainting_hijack(): def do_inpainting_hijack():
# p_sample_plms is needed because PLMS can't work with dicts as conditionings # p_sample_plms is needed because PLMS can't work with dicts as conditionings

13
modules/sd_hijack_ip2p.py Normal file
View File

@ -0,0 +1,13 @@
import collections
import os.path
import sys
import gc
import time
def should_hijack_ip2p(checkpoint_info):
from modules import sd_models_config
ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower()
return "pix2pix" in ckpt_basename and not "pix2pix" in cfg_basename

View File

@ -9,7 +9,7 @@ from torch import einsum
from ldm.util import default from ldm.util import default
from einops import rearrange from einops import rearrange
from modules import shared, errors from modules import shared, errors, devices
from modules.hypernetworks import hypernetwork from modules.hypernetworks import hypernetwork
from .sub_quadratic_attention import efficient_dot_product_attention from .sub_quadratic_attention import efficient_dot_product_attention
@ -52,18 +52,25 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
del q_in, k_in, v_in del q_in, k_in, v_in
r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) dtype = q.dtype
for i in range(0, q.shape[0], 2): if shared.opts.upcast_attn:
end = i + 2 q, k, v = q.float(), k.float(), v.float()
s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end])
s1 *= self.scale
s2 = s1.softmax(dim=-1) with devices.without_autocast(disable=not shared.opts.upcast_attn):
del s1 r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
for i in range(0, q.shape[0], 2):
end = i + 2
s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end])
s1 *= self.scale
s2 = s1.softmax(dim=-1)
del s1
r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end])
del s2
del q, k, v
r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) r1 = r1.to(dtype)
del s2
del q, k, v
r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
del r1 del r1
@ -82,45 +89,52 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
k_in = self.to_k(context_k) k_in = self.to_k(context_k)
v_in = self.to_v(context_v) v_in = self.to_v(context_v)
k_in *= self.scale dtype = q_in.dtype
if shared.opts.upcast_attn:
q_in, k_in, v_in = q_in.float(), k_in.float(), v_in if v_in.device.type == 'mps' else v_in.float()
del context, x with devices.without_autocast(disable=not shared.opts.upcast_attn):
k_in = k_in * self.scale
del context, x
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
del q_in, k_in, v_in
r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
mem_free_total = get_available_vram()
gb = 1024 ** 3
tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
modifier = 3 if q.element_size() == 2 else 2.5
mem_required = tensor_size * modifier
steps = 1
if mem_required > mem_free_total:
steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2)))
# print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
# f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")
if steps > 64:
max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free')
slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
for i in range(0, q.shape[1], slice_size):
end = i + slice_size
s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k)
s2 = s1.softmax(dim=-1, dtype=q.dtype)
del s1
r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
del s2
del q, k, v
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) r1 = r1.to(dtype)
del q_in, k_in, v_in
r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
mem_free_total = get_available_vram()
gb = 1024 ** 3
tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
modifier = 3 if q.element_size() == 2 else 2.5
mem_required = tensor_size * modifier
steps = 1
if mem_required > mem_free_total:
steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2)))
# print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
# f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")
if steps > 64:
max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free')
slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
for i in range(0, q.shape[1], slice_size):
end = i + slice_size
s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k)
s2 = s1.softmax(dim=-1, dtype=q.dtype)
del s1
r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
del s2
del q, k, v
r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
del r1 del r1
@ -204,12 +218,20 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None):
context = default(context, x) context = default(context, x)
context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context) context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context)
k = self.to_k(context_k) * self.scale k = self.to_k(context_k)
v = self.to_v(context_v) v = self.to_v(context_v)
del context, context_k, context_v, x del context, context_k, context_v, x
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) dtype = q.dtype
r = einsum_op(q, k, v) if shared.opts.upcast_attn:
q, k, v = q.float(), k.float(), v if v.device.type == 'mps' else v.float()
with devices.without_autocast(disable=not shared.opts.upcast_attn):
k = k * self.scale
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
r = einsum_op(q, k, v)
r = r.to(dtype)
return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h)) return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h))
# -- End of code from https://github.com/invoke-ai/InvokeAI -- # -- End of code from https://github.com/invoke-ai/InvokeAI --
@ -234,8 +256,14 @@ def sub_quad_attention_forward(self, x, context=None, mask=None):
k = k.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) k = k.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1)
v = v.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) v = v.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1)
dtype = q.dtype
if shared.opts.upcast_attn:
q, k = q.float(), k.float()
x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training)
x = x.to(dtype)
x = x.unflatten(0, (-1, h)).transpose(1,2).flatten(start_dim=2) x = x.unflatten(0, (-1, h)).transpose(1,2).flatten(start_dim=2)
out_proj, dropout = self.to_out out_proj, dropout = self.to_out
@ -268,15 +296,16 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_
query_chunk_size = q_tokens query_chunk_size = q_tokens
kv_chunk_size = k_tokens kv_chunk_size = k_tokens
return efficient_dot_product_attention( with devices.without_autocast(disable=q.dtype == v.dtype):
q, return efficient_dot_product_attention(
k, q,
v, k,
query_chunk_size=q_chunk_size, v,
kv_chunk_size=kv_chunk_size, query_chunk_size=q_chunk_size,
kv_chunk_size_min = kv_chunk_size_min, kv_chunk_size=kv_chunk_size,
use_checkpoint=use_checkpoint, kv_chunk_size_min = kv_chunk_size_min,
) use_checkpoint=use_checkpoint,
)
def get_xformers_flash_attention_op(q, k, v): def get_xformers_flash_attention_op(q, k, v):
@ -306,11 +335,63 @@ def xformers_attention_forward(self, x, context=None, mask=None):
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in)) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))
del q_in, k_in, v_in del q_in, k_in, v_in
dtype = q.dtype
if shared.opts.upcast_attn:
q, k, v = q.float(), k.float(), v.float()
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v)) out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v))
out = out.to(dtype)
out = rearrange(out, 'b n h d -> b n (h d)', h=h) out = rearrange(out, 'b n h d -> b n (h d)', h=h)
return self.to_out(out) return self.to_out(out)
# Based on Diffusers usage of scaled dot product attention from https://github.com/huggingface/diffusers/blob/c7da8fd23359a22d0df2741688b5b4f33c26df21/src/diffusers/models/cross_attention.py
# The scaled_dot_product_attention_forward function contains parts of code under Apache-2.0 license listed under Scaled Dot Product Attention in the Licenses section of the web UI interface
def scaled_dot_product_attention_forward(self, x, context=None, mask=None):
batch_size, sequence_length, inner_dim = x.shape
if mask is not None:
mask = self.prepare_attention_mask(mask, sequence_length, batch_size)
mask = mask.view(batch_size, self.heads, -1, mask.shape[-1])
h = self.heads
q_in = self.to_q(x)
context = default(context, x)
context_k, context_v = hypernetwork.apply_hypernetworks(shared.loaded_hypernetworks, context)
k_in = self.to_k(context_k)
v_in = self.to_v(context_v)
head_dim = inner_dim // h
q = q_in.view(batch_size, -1, h, head_dim).transpose(1, 2)
k = k_in.view(batch_size, -1, h, head_dim).transpose(1, 2)
v = v_in.view(batch_size, -1, h, head_dim).transpose(1, 2)
del q_in, k_in, v_in
dtype = q.dtype
if shared.opts.upcast_attn:
q, k, v = q.float(), k.float(), v.float()
# the output of sdp = (batch, num_heads, seq_len, head_dim)
hidden_states = torch.nn.functional.scaled_dot_product_attention(
q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False
)
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, h * head_dim)
hidden_states = hidden_states.to(dtype)
# linear proj
hidden_states = self.to_out[0](hidden_states)
# dropout
hidden_states = self.to_out[1](hidden_states)
return hidden_states
def scaled_dot_product_no_mem_attention_forward(self, x, context=None, mask=None):
with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=False):
return scaled_dot_product_attention_forward(self, x, context, mask)
def cross_attention_attnblock_forward(self, x): def cross_attention_attnblock_forward(self, x):
h_ = x h_ = x
h_ = self.norm(h_) h_ = self.norm(h_)
@ -378,16 +459,44 @@ def xformers_attnblock_forward(self, x):
v = self.v(h_) v = self.v(h_)
b, c, h, w = q.shape b, c, h, w = q.shape
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
dtype = q.dtype
if shared.opts.upcast_attn:
q, k = q.float(), k.float()
q = q.contiguous() q = q.contiguous()
k = k.contiguous() k = k.contiguous()
v = v.contiguous() v = v.contiguous()
out = xformers.ops.memory_efficient_attention(q, k, v, op=get_xformers_flash_attention_op(q, k, v)) out = xformers.ops.memory_efficient_attention(q, k, v, op=get_xformers_flash_attention_op(q, k, v))
out = out.to(dtype)
out = rearrange(out, 'b (h w) c -> b c h w', h=h) out = rearrange(out, 'b (h w) c -> b c h w', h=h)
out = self.proj_out(out) out = self.proj_out(out)
return x + out return x + out
except NotImplementedError: except NotImplementedError:
return cross_attention_attnblock_forward(self, x) return cross_attention_attnblock_forward(self, x)
def sdp_attnblock_forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
b, c, h, w = q.shape
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
dtype = q.dtype
if shared.opts.upcast_attn:
q, k = q.float(), k.float()
q = q.contiguous()
k = k.contiguous()
v = v.contiguous()
out = torch.nn.functional.scaled_dot_product_attention(q, k, v, dropout_p=0.0, is_causal=False)
out = out.to(dtype)
out = rearrange(out, 'b (h w) c -> b c h w', h=h)
out = self.proj_out(out)
return x + out
def sdp_no_mem_attnblock_forward(self, x):
with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=False):
return sdp_attnblock_forward(self, x)
def sub_quad_attnblock_forward(self, x): def sub_quad_attnblock_forward(self, x):
h_ = x h_ = x
h_ = self.norm(h_) h_ = self.norm(h_)

View File

@ -1,4 +1,8 @@
import torch import torch
from packaging import version
from modules import devices
from modules.sd_hijack_utils import CondFunc
class TorchHijackForUnet: class TorchHijackForUnet:
@ -28,3 +32,48 @@ class TorchHijackForUnet:
th = TorchHijackForUnet() th = TorchHijackForUnet()
# Below are monkey patches to enable upcasting a float16 UNet for float32 sampling
def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
if isinstance(cond, dict):
for y in cond.keys():
cond[y] = [x.to(devices.dtype_unet) if isinstance(x, torch.Tensor) else x for x in cond[y]]
with devices.autocast():
return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()
class GELUHijack(torch.nn.GELU, torch.nn.Module):
def __init__(self, *args, **kwargs):
torch.nn.GELU.__init__(self, *args, **kwargs)
def forward(self, x):
if devices.unet_needs_upcast:
return torch.nn.GELU.forward(self.float(), x.float()).to(devices.dtype_unet)
else:
return torch.nn.GELU.forward(self, x)
ddpm_edit_hijack = None
def hijack_ddpm_edit():
global ddpm_edit_hijack
if not ddpm_edit_hijack:
CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available():
CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast)
CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast)
CondFunc('open_clip.transformer.ResidualAttentionBlock.__init__', lambda orig_func, *args, **kwargs: kwargs.update({'act_layer': GELUHijack}) and False or orig_func(*args, **kwargs), lambda _, *args, **kwargs: kwargs.get('act_layer') is None or kwargs['act_layer'] == torch.nn.GELU)
first_stage_cond = lambda _, self, *args, **kwargs: devices.unet_needs_upcast and self.model.diffusion_model.dtype == torch.float16
first_stage_sub = lambda orig_func, self, x, **kwargs: orig_func(self, x.to(devices.dtype_vae), **kwargs)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)

View File

@ -0,0 +1,28 @@
import importlib
class CondFunc:
def __new__(cls, orig_func, sub_func, cond_func):
self = super(CondFunc, cls).__new__(cls)
if isinstance(orig_func, str):
func_path = orig_func.split('.')
for i in range(len(func_path)-1, -1, -1):
try:
resolved_obj = importlib.import_module('.'.join(func_path[:i]))
break
except ImportError:
pass
for attr_name in func_path[i:-1]:
resolved_obj = getattr(resolved_obj, attr_name)
orig_func = getattr(resolved_obj, func_path[-1])
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
self.__init__(orig_func, sub_func, cond_func)
return lambda *args, **kwargs: self(*args, **kwargs)
def __init__(self, orig_func, sub_func, cond_func):
self.__orig_func = orig_func
self.__sub_func = sub_func
self.__cond_func = cond_func
def __call__(self, *args, **kwargs):
if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
return self.__sub_func(self.__orig_func, *args, **kwargs)
else:
return self.__orig_func(*args, **kwargs)

View File

@ -2,8 +2,6 @@ import collections
import os.path import os.path
import sys import sys
import gc import gc
import time
from collections import namedtuple
import torch import torch
import re import re
import safetensors.torch import safetensors.torch
@ -14,12 +12,13 @@ import ldm.modules.midas as midas
from ldm.util import instantiate_from_config from ldm.util import instantiate_from_config
from modules import shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config
from modules.paths import models_path from modules.paths import models_path
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting from modules.sd_hijack_inpainting import do_inpainting_hijack
from modules.timer import Timer
model_dir = "Stable-diffusion" model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir)) model_path = os.path.abspath(os.path.join(paths.models_path, model_dir))
checkpoints_list = {} checkpoints_list = {}
checkpoint_alisases = {} checkpoint_alisases = {}
@ -42,6 +41,7 @@ class CheckpointInfo:
name = name[1:] name = name[1:]
self.name = name self.name = name
self.name_for_extra = os.path.splitext(os.path.basename(filename))[0]
self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0] self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
self.hash = model_hash(filename) self.hash = model_hash(filename)
@ -59,13 +59,17 @@ class CheckpointInfo:
def calculate_shorthash(self): def calculate_shorthash(self):
self.sha256 = hashes.sha256(self.filename, "checkpoint/" + self.name) self.sha256 = hashes.sha256(self.filename, "checkpoint/" + self.name)
if self.sha256 is None:
return
self.shorthash = self.sha256[0:10] self.shorthash = self.sha256[0:10]
if self.shorthash not in self.ids: if self.shorthash not in self.ids:
self.ids += [self.shorthash, self.sha256] self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]']
self.register()
checkpoints_list.pop(self.title)
self.title = f'{self.name} [{self.shorthash}]' self.title = f'{self.name} [{self.shorthash}]'
self.register()
return self.shorthash return self.shorthash
@ -98,23 +102,18 @@ def checkpoint_tiles():
return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key) return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key)
def find_checkpoint_config(info):
if info is None:
return shared.cmd_opts.config
config = os.path.splitext(info.filename)[0] + ".yaml"
if os.path.exists(config):
return config
return shared.cmd_opts.config
def list_models(): def list_models():
checkpoints_list.clear() checkpoints_list.clear()
checkpoint_alisases.clear() checkpoint_alisases.clear()
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], ext_blacklist=[".vae.safetensors"])
cmd_ckpt = shared.cmd_opts.ckpt cmd_ckpt = shared.cmd_opts.ckpt
if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
model_url = None
else:
model_url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
if os.path.exists(cmd_ckpt): if os.path.exists(cmd_ckpt):
checkpoint_info = CheckpointInfo(cmd_ckpt) checkpoint_info = CheckpointInfo(cmd_ckpt)
checkpoint_info.register() checkpoint_info.register()
@ -123,7 +122,7 @@ def list_models():
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file: elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr) print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list: for filename in sorted(model_list, key=str.lower):
checkpoint_info = CheckpointInfo(filename) checkpoint_info = CheckpointInfo(filename)
checkpoint_info.register() checkpoint_info.register()
@ -169,7 +168,7 @@ def select_checkpoint():
print(f" - directory {model_path}", file=sys.stderr) print(f" - directory {model_path}", file=sys.stderr)
if shared.cmd_opts.ckpt_dir is not None: if shared.cmd_opts.ckpt_dir is not None:
print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr) print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print("Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr) print("Can't run without a checkpoint. Find and place a .ckpt or .safetensors file into any of those locations. The program will exit.", file=sys.stderr)
exit(1) exit(1)
checkpoint_info = next(iter(checkpoints_list.values())) checkpoint_info = next(iter(checkpoints_list.values()))
@ -179,7 +178,7 @@ def select_checkpoint():
return checkpoint_info return checkpoint_info
chckpoint_dict_replacements = { checkpoint_dict_replacements = {
'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.', 'cond_stage_model.transformer.embeddings.': 'cond_stage_model.transformer.text_model.embeddings.',
'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.', 'cond_stage_model.transformer.encoder.': 'cond_stage_model.transformer.text_model.encoder.',
'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.', 'cond_stage_model.transformer.final_layer_norm.': 'cond_stage_model.transformer.text_model.final_layer_norm.',
@ -187,7 +186,7 @@ chckpoint_dict_replacements = {
def transform_checkpoint_dict_key(k): def transform_checkpoint_dict_key(k):
for text, replacement in chckpoint_dict_replacements.items(): for text, replacement in checkpoint_dict_replacements.items():
if k.startswith(text): if k.startswith(text):
k = replacement + k[len(text):] k = replacement + k[len(text):]
@ -211,12 +210,34 @@ def get_state_dict_from_checkpoint(pl_sd):
return pl_sd return pl_sd
def read_metadata_from_safetensors(filename):
import json
with open(filename, mode="rb") as file:
metadata_len = file.read(8)
metadata_len = int.from_bytes(metadata_len, "little")
json_start = file.read(2)
assert metadata_len > 2 and json_start in (b'{"', b"{'"), f"{filename} is not a safetensors file"
json_data = json_start + file.read(metadata_len-2)
json_obj = json.loads(json_data)
res = {}
for k, v in json_obj.get("__metadata__", {}).items():
res[k] = v
if isinstance(v, str) and v[0:1] == '{':
try:
res[k] = json.loads(v)
except Exception as e:
pass
return res
def read_state_dict(checkpoint_file, print_global_state=False, map_location=None): def read_state_dict(checkpoint_file, print_global_state=False, map_location=None):
_, extension = os.path.splitext(checkpoint_file) _, extension = os.path.splitext(checkpoint_file)
if extension.lower() == ".safetensors": if extension.lower() == ".safetensors":
device = map_location or shared.weight_load_location device = map_location or shared.weight_load_location or devices.get_optimal_device_name()
if device is None:
device = devices.get_cuda_device_string() if torch.cuda.is_available() else "cpu"
pl_sd = safetensors.torch.load_file(checkpoint_file, device=device) pl_sd = safetensors.torch.load_file(checkpoint_file, device=device)
else: else:
pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location) pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location)
@ -228,52 +249,72 @@ def read_state_dict(checkpoint_file, print_global_state=False, map_location=None
return sd return sd
def load_model_weights(model, checkpoint_info: CheckpointInfo): def get_checkpoint_state_dict(checkpoint_info: CheckpointInfo, timer):
title = checkpoint_info.title
sd_model_hash = checkpoint_info.calculate_shorthash() sd_model_hash = checkpoint_info.calculate_shorthash()
if checkpoint_info.title != title: timer.record("calculate hash")
shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
cache_enabled = shared.opts.sd_checkpoint_cache > 0 if checkpoint_info in checkpoints_loaded:
if cache_enabled and checkpoint_info in checkpoints_loaded:
# use checkpoint cache # use checkpoint cache
print(f"Loading weights [{sd_model_hash}] from cache") print(f"Loading weights [{sd_model_hash}] from cache")
model.load_state_dict(checkpoints_loaded[checkpoint_info]) return checkpoints_loaded[checkpoint_info]
else:
# load from file
print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
sd = read_state_dict(checkpoint_info.filename) print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
model.load_state_dict(sd, strict=False) res = read_state_dict(checkpoint_info.filename)
del sd timer.record("load weights from disk")
if cache_enabled:
# cache newly loaded model
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
if shared.cmd_opts.opt_channelslast: return res
model.to(memory_format=torch.channels_last)
if not shared.cmd_opts.no_half:
vae = model.first_stage_model
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16 def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer):
if shared.cmd_opts.no_half_vae: sd_model_hash = checkpoint_info.calculate_shorthash()
model.first_stage_model = None timer.record("calculate hash")
model.half() shared.opts.data["sd_model_checkpoint"] = checkpoint_info.title
model.first_stage_model = vae
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16 if state_dict is None:
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16 state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
model.first_stage_model.to(devices.dtype_vae) model.load_state_dict(state_dict, strict=False)
del state_dict
timer.record("apply weights to model")
if shared.opts.sd_checkpoint_cache > 0:
# cache newly loaded model
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
if shared.cmd_opts.opt_channelslast:
model.to(memory_format=torch.channels_last)
timer.record("apply channels_last")
if not shared.cmd_opts.no_half:
vae = model.first_stage_model
depth_model = getattr(model, 'depth_model', None)
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
if shared.cmd_opts.no_half_vae:
model.first_stage_model = None
# with --upcast-sampling, don't convert the depth model weights to float16
if shared.cmd_opts.upcast_sampling and depth_model:
model.depth_model = None
model.half()
model.first_stage_model = vae
if depth_model:
model.depth_model = depth_model
timer.record("apply half()")
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
devices.dtype_unet = model.model.diffusion_model.dtype
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
model.first_stage_model.to(devices.dtype_vae)
timer.record("apply dtype to VAE")
# clean up cache if limit is reached # clean up cache if limit is reached
if cache_enabled: while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model checkpoints_loaded.popitem(last=False)
checkpoints_loaded.popitem(last=False) # LRU
model.sd_model_hash = sd_model_hash model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_info.filename model.sd_model_checkpoint = checkpoint_info.filename
@ -286,6 +327,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo):
sd_vae.clear_loaded_vae() sd_vae.clear_loaded_vae()
vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename) vae_file, vae_source = sd_vae.resolve_vae(checkpoint_info.filename)
sd_vae.load_vae(model, vae_file, vae_source) sd_vae.load_vae(model, vae_file, vae_source)
timer.record("load VAE")
def enable_midas_autodownload(): def enable_midas_autodownload():
@ -298,7 +340,7 @@ def enable_midas_autodownload():
location automatically. location automatically.
""" """
midas_path = os.path.join(models_path, 'midas') midas_path = os.path.join(paths.models_path, 'midas')
# stable-diffusion-stability-ai hard-codes the midas model path to # stable-diffusion-stability-ai hard-codes the midas model path to
# a location that differs from where other scripts using this model look. # a location that differs from where other scripts using this model look.
@ -331,24 +373,31 @@ def enable_midas_autodownload():
midas.api.load_model = load_model_wrapper midas.api.load_model = load_model_wrapper
class Timer: def repair_config(sd_config):
def __init__(self):
self.start = time.time()
def elapsed(self): if not hasattr(sd_config.model.params, "use_ema"):
end = time.time() sd_config.model.params.use_ema = False
res = end - self.start
self.start = end if shared.cmd_opts.no_half:
return res sd_config.model.params.unet_config.params.use_fp16 = False
elif shared.cmd_opts.upcast_sampling:
sd_config.model.params.unet_config.params.use_fp16 = True
if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available:
sd_config.model.params.first_stage_config.params.ddconfig.attn_type = "vanilla"
# For UnCLIP-L, override the hardcoded karlo directory
if hasattr(sd_config.model.params, "noise_aug_config") and hasattr(sd_config.model.params.noise_aug_config.params, "clip_stats_path"):
karlo_path = os.path.join(paths.models_path, 'karlo')
sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path)
def load_model(checkpoint_info=None): sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight'
sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight'
def load_model(checkpoint_info=None, already_loaded_state_dict=None, time_taken_to_load_state_dict=None):
from modules import lowvram, sd_hijack from modules import lowvram, sd_hijack
checkpoint_info = checkpoint_info or select_checkpoint() checkpoint_info = checkpoint_info or select_checkpoint()
checkpoint_config = find_checkpoint_config(checkpoint_info)
if checkpoint_config != shared.cmd_opts.config:
print(f"Loading config from: {checkpoint_config}")
if shared.sd_model: if shared.sd_model:
sd_hijack.model_hijack.undo_hijack(shared.sd_model) sd_hijack.model_hijack.undo_hijack(shared.sd_model)
@ -356,29 +405,30 @@ def load_model(checkpoint_info=None):
gc.collect() gc.collect()
devices.torch_gc() devices.torch_gc()
sd_config = OmegaConf.load(checkpoint_config)
if should_hijack_inpainting(checkpoint_info):
# Hardcoded config for now...
sd_config.model.target = "ldm.models.diffusion.ddpm.LatentInpaintDiffusion"
sd_config.model.params.conditioning_key = "hybrid"
sd_config.model.params.unet_config.params.in_channels = 9
sd_config.model.params.finetune_keys = None
if not hasattr(sd_config.model.params, "use_ema"):
sd_config.model.params.use_ema = False
do_inpainting_hijack() do_inpainting_hijack()
if shared.cmd_opts.no_half:
sd_config.model.params.unet_config.params.use_fp16 = False
timer = Timer() timer = Timer()
sd_model = None if already_loaded_state_dict is not None:
state_dict = already_loaded_state_dict
else:
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
clip_is_included_into_sd = sd1_clip_weight in state_dict or sd2_clip_weight in state_dict
timer.record("find config")
sd_config = OmegaConf.load(checkpoint_config)
repair_config(sd_config)
timer.record("load config")
print(f"Creating model from config: {checkpoint_config}")
sd_model = None
try: try:
with sd_disable_initialization.DisableInitialization(): with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
sd_model = instantiate_from_config(sd_config.model) sd_model = instantiate_from_config(sd_config.model)
except Exception as e: except Exception as e:
pass pass
@ -387,29 +437,35 @@ def load_model(checkpoint_info=None):
print('Failed to create model quickly; will retry using slow method.', file=sys.stderr) print('Failed to create model quickly; will retry using slow method.', file=sys.stderr)
sd_model = instantiate_from_config(sd_config.model) sd_model = instantiate_from_config(sd_config.model)
elapsed_create = timer.elapsed() sd_model.used_config = checkpoint_config
load_model_weights(sd_model, checkpoint_info) timer.record("create model")
elapsed_load_weights = timer.elapsed() load_model_weights(sd_model, checkpoint_info, state_dict, timer)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram) lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
else: else:
sd_model.to(shared.device) sd_model.to(shared.device)
timer.record("move model to device")
sd_hijack.model_hijack.hijack(sd_model) sd_hijack.model_hijack.hijack(sd_model)
timer.record("hijack")
sd_model.eval() sd_model.eval()
shared.sd_model = sd_model shared.sd_model = sd_model
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model
timer.record("load textual inversion embeddings")
script_callbacks.model_loaded_callback(sd_model) script_callbacks.model_loaded_callback(sd_model)
elapsed_the_rest = timer.elapsed() timer.record("scripts callbacks")
print(f"Model loaded in {elapsed_create + elapsed_load_weights + elapsed_the_rest:.1f}s ({elapsed_create:.1f}s create model, {elapsed_load_weights:.1f}s load weights).") print(f"Model loaded in {timer.summary()}.")
return sd_model return sd_model
@ -420,6 +476,7 @@ def reload_model_weights(sd_model=None, info=None):
if not sd_model: if not sd_model:
sd_model = shared.sd_model sd_model = shared.sd_model
if sd_model is None: # previous model load failed if sd_model is None: # previous model load failed
current_checkpoint_info = None current_checkpoint_info = None
else: else:
@ -427,38 +484,64 @@ def reload_model_weights(sd_model=None, info=None):
if sd_model.sd_model_checkpoint == checkpoint_info.filename: if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return return
checkpoint_config = find_checkpoint_config(current_checkpoint_info) if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
if current_checkpoint_info is None or checkpoint_config != find_checkpoint_config(checkpoint_info) or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info): sd_hijack.model_hijack.undo_hijack(sd_model)
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info)
return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(sd_model)
timer = Timer() timer = Timer()
state_dict = get_checkpoint_state_dict(checkpoint_info, timer)
checkpoint_config = sd_models_config.find_checkpoint_config(state_dict, checkpoint_info)
timer.record("find config")
if sd_model is None or checkpoint_config != sd_model.used_config:
del sd_model
checkpoints_loaded.clear()
load_model(checkpoint_info, already_loaded_state_dict=state_dict)
return shared.sd_model
try: try:
load_model_weights(sd_model, checkpoint_info) load_model_weights(sd_model, checkpoint_info, state_dict, timer)
except Exception as e: except Exception as e:
print("Failed to load checkpoint, restoring previous") print("Failed to load checkpoint, restoring previous")
load_model_weights(sd_model, current_checkpoint_info) load_model_weights(sd_model, current_checkpoint_info, None, timer)
raise raise
finally: finally:
sd_hijack.model_hijack.hijack(sd_model) sd_hijack.model_hijack.hijack(sd_model)
timer.record("hijack")
script_callbacks.model_loaded_callback(sd_model) script_callbacks.model_loaded_callback(sd_model)
timer.record("script callbacks")
if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram: if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram:
sd_model.to(devices.device) sd_model.to(devices.device)
timer.record("move model to device")
elapsed = timer.elapsed() print(f"Weights loaded in {timer.summary()}.")
print(f"Weights loaded in {elapsed:.1f}s.")
return sd_model return sd_model
def unload_model_weights(sd_model=None, info=None):
from modules import lowvram, devices, sd_hijack
timer = Timer()
if shared.sd_model:
# shared.sd_model.cond_stage_model.to(devices.cpu)
# shared.sd_model.first_stage_model.to(devices.cpu)
shared.sd_model.to(devices.cpu)
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
shared.sd_model = None
sd_model = None
gc.collect()
devices.torch_gc()
torch.cuda.empty_cache()
print(f"Unloaded weights {timer.summary()}.")
return sd_model

119
modules/sd_models_config.py Normal file
View File

@ -0,0 +1,119 @@
import re
import os
import torch
from modules import shared, paths, sd_disable_initialization
sd_configs_path = shared.sd_configs_path
sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
config_default = shared.sd_default_config
config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml")
config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml")
config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
def is_using_v_parameterization_for_sd2(state_dict):
"""
Detects whether unet in state_dict is using v-parameterization. Returns True if it is. You're welcome.
"""
import ldm.modules.diffusionmodules.openaimodel
from modules import devices
device = devices.cpu
with sd_disable_initialization.DisableInitialization():
unet = ldm.modules.diffusionmodules.openaimodel.UNetModel(
use_checkpoint=True,
use_fp16=False,
image_size=32,
in_channels=4,
out_channels=4,
model_channels=320,
attention_resolutions=[4, 2, 1],
num_res_blocks=2,
channel_mult=[1, 2, 4, 4],
num_head_channels=64,
use_spatial_transformer=True,
use_linear_in_transformer=True,
transformer_depth=1,
context_dim=1024,
legacy=False
)
unet.eval()
with torch.no_grad():
unet_sd = {k.replace("model.diffusion_model.", ""): v for k, v in state_dict.items() if "model.diffusion_model." in k}
unet.load_state_dict(unet_sd, strict=True)
unet.to(device=device, dtype=torch.float)
test_cond = torch.ones((1, 2, 1024), device=device) * 0.5
x_test = torch.ones((1, 4, 8, 8), device=device) * 0.5
out = (unet(x_test, torch.asarray([999], device=device), context=test_cond) - x_test).mean().item()
return out < -1
def guess_model_config_from_state_dict(sd, filename):
sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
if sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
return config_depth_model
elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 768:
return config_unclip
elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 1024:
return config_unopenclip
if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
if diffusion_model_input.shape[1] == 9:
return config_sd2_inpainting
elif is_using_v_parameterization_for_sd2(sd):
return config_sd2v
else:
return config_sd2
if diffusion_model_input is not None:
if diffusion_model_input.shape[1] == 9:
return config_inpainting
if diffusion_model_input.shape[1] == 8:
return config_instruct_pix2pix
if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
return config_alt_diffusion
return config_default
def find_checkpoint_config(state_dict, info):
if info is None:
return guess_model_config_from_state_dict(state_dict, "")
config = find_checkpoint_config_near_filename(info)
if config is not None:
return config
return guess_model_config_from_state_dict(state_dict, info.filename)
def find_checkpoint_config_near_filename(info):
if info is None:
return None
config = os.path.splitext(info.filename)[0] + ".yaml"
if os.path.exists(config):
return config
return None

View File

@ -1,53 +1,11 @@
from collections import namedtuple, deque from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared
import numpy as np
from math import floor
import torch
import tqdm
from PIL import Image
import inspect
import k_diffusion.sampling
import torchsde._brownian.brownian_interval
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
from modules import prompt_parser, devices, processing, images, sd_vae_approx
from modules.shared import opts, cmd_opts, state # imports for functions that previously were here and are used by other modules
import modules.shared as shared from modules.sd_samplers_common import samples_to_image_grid, sample_to_image
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
samplers_k_diffusion = [
('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}),
('Euler', 'sample_euler', ['k_euler'], {}),
('LMS', 'sample_lms', ['k_lms'], {}),
('Heun', 'sample_heun', ['k_heun'], {}),
('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}),
('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True}),
('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}),
('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}),
]
samplers_data_k_diffusion = [
SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
for label, funcname, aliases, options in samplers_k_diffusion
if hasattr(k_diffusion.sampling, funcname)
]
all_samplers = [ all_samplers = [
*samplers_data_k_diffusion, *sd_samplers_kdiffusion.samplers_data_k_diffusion,
SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}), *sd_samplers_compvis.samplers_data_compvis,
SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}),
] ]
all_samplers_map = {x.name: x for x in all_samplers} all_samplers_map = {x.name: x for x in all_samplers}
@ -73,8 +31,8 @@ def create_sampler(name, model):
def set_samplers(): def set_samplers():
global samplers, samplers_for_img2img global samplers, samplers_for_img2img
hidden = set(opts.hide_samplers) hidden = set(shared.opts.hide_samplers)
hidden_img2img = set(opts.hide_samplers + ['PLMS']) hidden_img2img = set(shared.opts.hide_samplers + ['PLMS', 'UniPC'])
samplers = [x for x in all_samplers if x.name not in hidden] samplers = [x for x in all_samplers if x.name not in hidden]
samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img] samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img]
@ -87,466 +45,3 @@ def set_samplers():
set_samplers() set_samplers()
sampler_extra_params = {
'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None:
requested_steps = (steps or p.steps)
steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
t_enc = requested_steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
return steps, t_enc
approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2}
def single_sample_to_image(sample, approximation=None):
if approximation is None:
approximation = approximation_indexes.get(opts.show_progress_type, 0)
if approximation == 2:
x_sample = sd_vae_approx.cheap_approximation(sample)
elif approximation == 1:
x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
else:
x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
return Image.fromarray(x_sample)
def sample_to_image(samples, index=0, approximation=None):
return single_sample_to_image(samples[index], approximation)
def samples_to_image_grid(samples, approximation=None):
return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
def store_latent(decoded):
state.current_latent = decoded
if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
if not shared.parallel_processing_allowed:
shared.state.assign_current_image(sample_to_image(decoded))
class InterruptedException(BaseException):
pass
class VanillaStableDiffusionSampler:
def __init__(self, constructor, sd_model):
self.sampler = constructor(sd_model)
self.is_plms = hasattr(self.sampler, 'p_sample_plms')
self.orig_p_sample_ddim = self.sampler.p_sample_plms if self.is_plms else self.sampler.p_sample_ddim
self.mask = None
self.nmask = None
self.init_latent = None
self.sampler_noises = None
self.step = 0
self.stop_at = None
self.eta = None
self.default_eta = 0.0
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def number_of_needed_noises(self, p):
return 0
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
if state.interrupted or state.skipped:
raise InterruptedException
if self.stop_at is not None and self.step > self.stop_at:
raise InterruptedException
# Have to unwrap the inpainting conditioning here to perform pre-processing
image_conditioning = None
if isinstance(cond, dict):
image_conditioning = cond["c_concat"][0]
cond = cond["c_crossattn"][0]
unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
cond = tensor
# for DDIM, shapes must match, we can't just process cond and uncond independently;
# filling unconditional_conditioning with repeats of the last vector to match length is
# not 100% correct but should work well enough
if unconditional_conditioning.shape[1] < cond.shape[1]:
last_vector = unconditional_conditioning[:, -1:]
last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
elif unconditional_conditioning.shape[1] > cond.shape[1]:
unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
if self.mask is not None:
img_orig = self.sampler.model.q_sample(self.init_latent, ts)
x_dec = img_orig * self.mask + self.nmask * x_dec
# Wrap the image conditioning back up since the DDIM code can accept the dict directly.
# Note that they need to be lists because it just concatenates them later.
if image_conditioning is not None:
cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
if self.mask is not None:
self.last_latent = self.init_latent * self.mask + self.nmask * res[1]
else:
self.last_latent = res[1]
store_latent(self.last_latent)
self.step += 1
state.sampling_step = self.step
shared.total_tqdm.update()
return res
def initialize(self, p):
self.eta = p.eta if p.eta is not None else opts.eta_ddim
for fieldname in ['p_sample_ddim', 'p_sample_plms']:
if hasattr(self.sampler, fieldname):
setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def adjust_steps_if_invalid(self, p, num_steps):
if (self.config.name == 'DDIM' and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS'):
valid_step = 999 / (1000 // num_steps)
if valid_step == floor(valid_step):
return int(valid_step) + 1
return num_steps
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
steps = self.adjust_steps_if_invalid(p, steps)
self.initialize(p)
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x
self.last_latent = x
self.step = 0
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
self.initialize(p)
self.init_latent = None
self.last_latent = x
self.step = 0
steps = self.adjust_steps_if_invalid(p, steps or p.steps)
# Wrap the conditioning models with additional image conditioning for inpainting model
# dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape
if image_conditioning is not None:
conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]}
unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]}
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
return samples_ddim
class CFGDenoiser(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
self.mask = None
self.nmask = None
self.init_latent = None
self.step = 0
def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
denoised_uncond = x_out[-uncond.shape[0]:]
denoised = torch.clone(denoised_uncond)
for i, conds in enumerate(conds_list):
for cond_index, weight in conds:
denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
return denoised
def forward(self, x, sigma, uncond, cond, cond_scale, image_cond):
if state.interrupted or state.skipped:
raise InterruptedException
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
batch_size = len(conds_list)
repeats = [len(conds_list[i]) for i in range(batch_size)]
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps)
cfg_denoiser_callback(denoiser_params)
x_in = denoiser_params.x
image_cond_in = denoiser_params.image_cond
sigma_in = denoiser_params.sigma
if tensor.shape[1] == uncond.shape[1]:
cond_in = torch.cat([tensor, uncond])
if shared.batch_cond_uncond:
x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]})
else:
x_out = torch.zeros_like(x_in)
for batch_offset in range(0, x_out.shape[0], batch_size):
a = batch_offset
b = a + batch_size
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]})
else:
x_out = torch.zeros_like(x_in)
batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
for batch_offset in range(0, tensor.shape[0], batch_size):
a = batch_offset
b = min(a + batch_size, tensor.shape[0])
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [tensor[a:b]], "c_concat": [image_cond_in[a:b]]})
x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]})
devices.test_for_nans(x_out, "unet")
if opts.live_preview_content == "Prompt":
store_latent(x_out[0:uncond.shape[0]])
elif opts.live_preview_content == "Negative prompt":
store_latent(x_out[-uncond.shape[0]:])
denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale)
if self.mask is not None:
denoised = self.init_latent * self.mask + self.nmask * denoised
self.step += 1
return denoised
class TorchHijack:
def __init__(self, sampler_noises):
# Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based
# implementation.
self.sampler_noises = deque(sampler_noises)
def __getattr__(self, item):
if item == 'randn_like':
return self.randn_like
if hasattr(torch, item):
return getattr(torch, item)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
def randn_like(self, x):
if self.sampler_noises:
noise = self.sampler_noises.popleft()
if noise.shape == x.shape:
return noise
if x.device.type == 'mps':
return torch.randn_like(x, device=devices.cpu).to(x.device)
else:
return torch.randn_like(x)
# MPS fix for randn in torchsde
def torchsde_randn(size, dtype, device, seed):
if device.type == 'mps':
generator = torch.Generator(devices.cpu).manual_seed(int(seed))
return torch.randn(size, dtype=dtype, device=devices.cpu, generator=generator).to(device)
else:
generator = torch.Generator(device).manual_seed(int(seed))
return torch.randn(size, dtype=dtype, device=device, generator=generator)
torchsde._brownian.brownian_interval._randn = torchsde_randn
class KDiffusionSampler:
def __init__(self, funcname, sd_model):
denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser
self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
self.stop_at = None
self.eta = None
self.default_eta = 1.0
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def callback_state(self, d):
step = d['i']
latent = d["denoised"]
if opts.live_preview_content == "Combined":
store_latent(latent)
self.last_latent = latent
if self.stop_at is not None and step > self.stop_at:
raise InterruptedException
state.sampling_step = step
shared.total_tqdm.update()
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except InterruptedException:
return self.last_latent
def number_of_needed_noises(self, p):
return p.steps
def initialize(self, p):
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
self.model_wrap.step = 0
self.eta = p.eta or opts.eta_ancestral
k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
extra_params_kwargs = {}
for param_name in self.extra_params:
if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
extra_params_kwargs[param_name] = getattr(p, param_name)
if 'eta' in inspect.signature(self.func).parameters:
extra_params_kwargs['eta'] = self.eta
return extra_params_kwargs
def get_sigmas(self, p, steps):
discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma:
discard_next_to_last_sigma = True
p.extra_generation_params["Discard penultimate sigma"] = True
steps += 1 if discard_next_to_last_sigma else 0
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
if discard_next_to_last_sigma:
sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
return sigmas
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = setup_img2img_steps(p, steps)
sigmas = self.get_sigmas(p, steps)
sigma_sched = sigmas[steps - t_enc - 1:]
xi = x + noise * sigma_sched[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last
extra_params_kwargs['sigma_min'] = sigma_sched[-2]
if 'sigma_max' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_max'] = sigma_sched[0]
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = len(sigma_sched) - 1
if 'sigma_sched' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_sched'] = sigma_sched
if 'sigmas' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigmas'] = sigma_sched
self.model_wrap_cfg.init_latent = x
self.last_latent = x
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning = None):
steps = steps or p.steps
sigmas = self.get_sigmas(p, steps)
x = x * sigmas[0]
extra_params_kwargs = self.initialize(p)
if 'sigma_min' in inspect.signature(self.func).parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
if 'n' in inspect.signature(self.func).parameters:
extra_params_kwargs['n'] = steps
else:
extra_params_kwargs['sigmas'] = sigmas
self.last_latent = x
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples

View File

@ -0,0 +1,62 @@
from collections import namedtuple
import numpy as np
import torch
from PIL import Image
from modules import devices, processing, images, sd_vae_approx
from modules.shared import opts, state
import modules.shared as shared
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
def setup_img2img_steps(p, steps=None):
if opts.img2img_fix_steps or steps is not None:
requested_steps = (steps or p.steps)
steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
t_enc = requested_steps - 1
else:
steps = p.steps
t_enc = int(min(p.denoising_strength, 0.999) * steps)
return steps, t_enc
approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2}
def single_sample_to_image(sample, approximation=None):
if approximation is None:
approximation = approximation_indexes.get(opts.show_progress_type, 0)
if approximation == 2:
x_sample = sd_vae_approx.cheap_approximation(sample)
elif approximation == 1:
x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype).unsqueeze(0))[0].detach()
else:
x_sample = processing.decode_first_stage(shared.sd_model, sample.unsqueeze(0))[0]
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
return Image.fromarray(x_sample)
def sample_to_image(samples, index=0, approximation=None):
return single_sample_to_image(samples[index], approximation)
def samples_to_image_grid(samples, approximation=None):
return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
def store_latent(decoded):
state.current_latent = decoded
if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
if not shared.parallel_processing_allowed:
shared.state.assign_current_image(sample_to_image(decoded))
class InterruptedException(BaseException):
pass

View File

@ -0,0 +1,220 @@
import math
import ldm.models.diffusion.ddim
import ldm.models.diffusion.plms
import numpy as np
import torch
from modules.shared import state
from modules import sd_samplers_common, prompt_parser, shared
import modules.models.diffusion.uni_pc
samplers_data_compvis = [
sd_samplers_common.SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}),
sd_samplers_common.SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}),
sd_samplers_common.SamplerData('UniPC', lambda model: VanillaStableDiffusionSampler(modules.models.diffusion.uni_pc.UniPCSampler, model), [], {}),
]
class VanillaStableDiffusionSampler:
def __init__(self, constructor, sd_model):
self.sampler = constructor(sd_model)
self.is_ddim = hasattr(self.sampler, 'p_sample_ddim')
self.is_plms = hasattr(self.sampler, 'p_sample_plms')
self.is_unipc = isinstance(self.sampler, modules.models.diffusion.uni_pc.UniPCSampler)
self.orig_p_sample_ddim = None
if self.is_plms:
self.orig_p_sample_ddim = self.sampler.p_sample_plms
elif self.is_ddim:
self.orig_p_sample_ddim = self.sampler.p_sample_ddim
self.mask = None
self.nmask = None
self.init_latent = None
self.sampler_noises = None
self.step = 0
self.stop_at = None
self.eta = None
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def number_of_needed_noises(self, p):
return 0
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except sd_samplers_common.InterruptedException:
return self.last_latent
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
x_dec, ts, cond, unconditional_conditioning = self.before_sample(x_dec, ts, cond, unconditional_conditioning)
res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
x_dec, ts, cond, unconditional_conditioning, res = self.after_sample(x_dec, ts, cond, unconditional_conditioning, res)
return res
def before_sample(self, x, ts, cond, unconditional_conditioning):
if state.interrupted or state.skipped:
raise sd_samplers_common.InterruptedException
if self.stop_at is not None and self.step > self.stop_at:
raise sd_samplers_common.InterruptedException
# Have to unwrap the inpainting conditioning here to perform pre-processing
image_conditioning = None
uc_image_conditioning = None
if isinstance(cond, dict):
if self.conditioning_key == "crossattn-adm":
image_conditioning = cond["c_adm"]
uc_image_conditioning = unconditional_conditioning["c_adm"]
else:
image_conditioning = cond["c_concat"][0]
cond = cond["c_crossattn"][0]
unconditional_conditioning = unconditional_conditioning["c_crossattn"][0]
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
cond = tensor
# for DDIM, shapes must match, we can't just process cond and uncond independently;
# filling unconditional_conditioning with repeats of the last vector to match length is
# not 100% correct but should work well enough
if unconditional_conditioning.shape[1] < cond.shape[1]:
last_vector = unconditional_conditioning[:, -1:]
last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
elif unconditional_conditioning.shape[1] > cond.shape[1]:
unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
if self.mask is not None:
img_orig = self.sampler.model.q_sample(self.init_latent, ts)
x = img_orig * self.mask + self.nmask * x
# Wrap the image conditioning back up since the DDIM code can accept the dict directly.
# Note that they need to be lists because it just concatenates them later.
if image_conditioning is not None:
if self.conditioning_key == "crossattn-adm":
cond = {"c_adm": image_conditioning, "c_crossattn": [cond]}
unconditional_conditioning = {"c_adm": uc_image_conditioning, "c_crossattn": [unconditional_conditioning]}
else:
cond = {"c_concat": [image_conditioning], "c_crossattn": [cond]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
return x, ts, cond, unconditional_conditioning
def update_step(self, last_latent):
if self.mask is not None:
self.last_latent = self.init_latent * self.mask + self.nmask * last_latent
else:
self.last_latent = last_latent
sd_samplers_common.store_latent(self.last_latent)
self.step += 1
state.sampling_step = self.step
shared.total_tqdm.update()
def after_sample(self, x, ts, cond, uncond, res):
if not self.is_unipc:
self.update_step(res[1])
return x, ts, cond, uncond, res
def unipc_after_update(self, x, model_x):
self.update_step(x)
def initialize(self, p):
self.eta = p.eta if p.eta is not None else shared.opts.eta_ddim
if self.eta != 0.0:
p.extra_generation_params["Eta DDIM"] = self.eta
if self.is_unipc:
keys = [
('UniPC variant', 'uni_pc_variant'),
('UniPC skip type', 'uni_pc_skip_type'),
('UniPC order', 'uni_pc_order'),
('UniPC lower order final', 'uni_pc_lower_order_final'),
]
for name, key in keys:
v = getattr(shared.opts, key)
if v != shared.opts.get_default(key):
p.extra_generation_params[name] = v
for fieldname in ['p_sample_ddim', 'p_sample_plms']:
if hasattr(self.sampler, fieldname):
setattr(self.sampler, fieldname, self.p_sample_ddim_hook)
if self.is_unipc:
self.sampler.set_hooks(lambda x, t, c, u: self.before_sample(x, t, c, u), lambda x, t, c, u, r: self.after_sample(x, t, c, u, r), lambda x, mx: self.unipc_after_update(x, mx))
self.mask = p.mask if hasattr(p, 'mask') else None
self.nmask = p.nmask if hasattr(p, 'nmask') else None
def adjust_steps_if_invalid(self, p, num_steps):
if ((self.config.name == 'DDIM') and p.ddim_discretize == 'uniform') or (self.config.name == 'PLMS') or (self.config.name == 'UniPC'):
if self.config.name == 'UniPC' and num_steps < shared.opts.uni_pc_order:
num_steps = shared.opts.uni_pc_order
valid_step = 999 / (1000 // num_steps)
if valid_step == math.floor(valid_step):
return int(valid_step) + 1
return num_steps
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
steps = self.adjust_steps_if_invalid(p, steps)
self.initialize(p)
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
x1 = self.sampler.stochastic_encode(x, torch.tensor([t_enc] * int(x.shape[0])).to(shared.device), noise=noise)
self.init_latent = x
self.last_latent = x
self.step = 0
# Wrap the conditioning models with additional image conditioning for inpainting model
if image_conditioning is not None:
if self.conditioning_key == "crossattn-adm":
conditioning = {"c_adm": image_conditioning, "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_adm": torch.zeros_like(image_conditioning), "c_crossattn": [unconditional_conditioning]}
else:
conditioning = {"c_concat": [image_conditioning], "c_crossattn": [conditioning]}
unconditional_conditioning = {"c_concat": [image_conditioning], "c_crossattn": [unconditional_conditioning]}
samples = self.launch_sampling(t_enc + 1, lambda: self.sampler.decode(x1, conditioning, t_enc, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
self.initialize(p)
self.init_latent = None
self.last_latent = x
self.step = 0
steps = self.adjust_steps_if_invalid(p, steps or p.steps)
# Wrap the conditioning models with additional image conditioning for inpainting model
# dummy_for_plms is needed because PLMS code checks the first item in the dict to have the right shape
if image_conditioning is not None:
if self.conditioning_key == "crossattn-adm":
conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_adm": image_conditioning}
unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_adm": torch.zeros_like(image_conditioning)}
else:
conditioning = {"dummy_for_plms": np.zeros((conditioning.shape[0],)), "c_crossattn": [conditioning], "c_concat": [image_conditioning]}
unconditional_conditioning = {"c_crossattn": [unconditional_conditioning], "c_concat": [image_conditioning]}
samples_ddim = self.launch_sampling(steps, lambda: self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)[0])
return samples_ddim

View File

@ -0,0 +1,366 @@
from collections import deque
import torch
import inspect
import einops
import k_diffusion.sampling
from modules import prompt_parser, devices, sd_samplers_common
from modules.shared import opts, state
import modules.shared as shared
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback
samplers_k_diffusion = [
('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}),
('Euler', 'sample_euler', ['k_euler'], {}),
('LMS', 'sample_lms', ['k_lms'], {}),
('Heun', 'sample_heun', ['k_heun'], {}),
('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}),
('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True}),
('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}),
('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}),
('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}),
('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}),
('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}),
]
samplers_data_k_diffusion = [
sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
for label, funcname, aliases, options in samplers_k_diffusion
if hasattr(k_diffusion.sampling, funcname)
]
sampler_extra_params = {
'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
class CFGDenoiser(torch.nn.Module):
"""
Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet)
that can take a noisy picture and produce a noise-free picture using two guidances (prompts)
instead of one. Originally, the second prompt is just an empty string, but we use non-empty
negative prompt.
"""
def __init__(self, model):
super().__init__()
self.inner_model = model
self.mask = None
self.nmask = None
self.init_latent = None
self.step = 0
self.image_cfg_scale = None
def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
denoised_uncond = x_out[-uncond.shape[0]:]
denoised = torch.clone(denoised_uncond)
for i, conds in enumerate(conds_list):
for cond_index, weight in conds:
denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
return denoised
def combine_denoised_for_edit_model(self, x_out, cond_scale):
out_cond, out_img_cond, out_uncond = x_out.chunk(3)
denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond)
return denoised
def forward(self, x, sigma, uncond, cond, cond_scale, image_cond):
if state.interrupted or state.skipped:
raise sd_samplers_common.InterruptedException
# at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling,
# so is_edit_model is set to False to support AND composition.
is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
assert not is_edit_model or all([len(conds) == 1 for conds in conds_list]), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
batch_size = len(conds_list)
repeats = [len(conds_list[i]) for i in range(batch_size)]
if shared.sd_model.model.conditioning_key == "crossattn-adm":
image_uncond = torch.zeros_like(image_cond)
make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": c_crossattn, "c_adm": c_adm}
else:
image_uncond = image_cond
make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": c_crossattn, "c_concat": [c_concat]}
if not is_edit_model:
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond])
else:
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma])
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)])
denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond)
cfg_denoiser_callback(denoiser_params)
x_in = denoiser_params.x
image_cond_in = denoiser_params.image_cond
sigma_in = denoiser_params.sigma
tensor = denoiser_params.text_cond
uncond = denoiser_params.text_uncond
if tensor.shape[1] == uncond.shape[1]:
if not is_edit_model:
cond_in = torch.cat([tensor, uncond])
else:
cond_in = torch.cat([tensor, uncond, uncond])
if shared.batch_cond_uncond:
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict([cond_in], image_cond_in))
else:
x_out = torch.zeros_like(x_in)
for batch_offset in range(0, x_out.shape[0], batch_size):
a = batch_offset
b = a + batch_size
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict([cond_in[a:b]], image_cond_in[a:b]))
else:
x_out = torch.zeros_like(x_in)
batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
for batch_offset in range(0, tensor.shape[0], batch_size):
a = batch_offset
b = min(a + batch_size, tensor.shape[0])
if not is_edit_model:
c_crossattn = [tensor[a:b]]
else:
c_crossattn = torch.cat([tensor[a:b]], uncond)
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b]))
x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:]))
denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps)
cfg_denoised_callback(denoised_params)
devices.test_for_nans(x_out, "unet")
if opts.live_preview_content == "Prompt":
sd_samplers_common.store_latent(x_out[0:uncond.shape[0]])
elif opts.live_preview_content == "Negative prompt":
sd_samplers_common.store_latent(x_out[-uncond.shape[0]:])
if not is_edit_model:
denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale)
else:
denoised = self.combine_denoised_for_edit_model(x_out, cond_scale)
if self.mask is not None:
denoised = self.init_latent * self.mask + self.nmask * denoised
self.step += 1
return denoised
class TorchHijack:
def __init__(self, sampler_noises):
# Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based
# implementation.
self.sampler_noises = deque(sampler_noises)
def __getattr__(self, item):
if item == 'randn_like':
return self.randn_like
if hasattr(torch, item):
return getattr(torch, item)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item))
def randn_like(self, x):
if self.sampler_noises:
noise = self.sampler_noises.popleft()
if noise.shape == x.shape:
return noise
if x.device.type == 'mps':
return torch.randn_like(x, device=devices.cpu).to(x.device)
else:
return torch.randn_like(x)
class KDiffusionSampler:
def __init__(self, funcname, sd_model):
denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser
self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization)
self.funcname = funcname
self.func = getattr(k_diffusion.sampling, self.funcname)
self.extra_params = sampler_extra_params.get(funcname, [])
self.model_wrap_cfg = CFGDenoiser(self.model_wrap)
self.sampler_noises = None
self.stop_at = None
self.eta = None
self.config = None
self.last_latent = None
self.conditioning_key = sd_model.model.conditioning_key
def callback_state(self, d):
step = d['i']
latent = d["denoised"]
if opts.live_preview_content == "Combined":
sd_samplers_common.store_latent(latent)
self.last_latent = latent
if self.stop_at is not None and step > self.stop_at:
raise sd_samplers_common.InterruptedException
state.sampling_step = step
shared.total_tqdm.update()
def launch_sampling(self, steps, func):
state.sampling_steps = steps
state.sampling_step = 0
try:
return func()
except sd_samplers_common.InterruptedException:
return self.last_latent
def number_of_needed_noises(self, p):
return p.steps
def initialize(self, p):
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
self.model_wrap_cfg.step = 0
self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
self.eta = p.eta if p.eta is not None else opts.eta_ancestral
k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
extra_params_kwargs = {}
for param_name in self.extra_params:
if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
extra_params_kwargs[param_name] = getattr(p, param_name)
if 'eta' in inspect.signature(self.func).parameters:
if self.eta != 1.0:
p.extra_generation_params["Eta"] = self.eta
extra_params_kwargs['eta'] = self.eta
return extra_params_kwargs
def get_sigmas(self, p, steps):
discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)
if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma:
discard_next_to_last_sigma = True
p.extra_generation_params["Discard penultimate sigma"] = True
steps += 1 if discard_next_to_last_sigma else 0
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
if discard_next_to_last_sigma:
sigmas = torch.cat([sigmas[:-2], sigmas[-1:]])
return sigmas
def create_noise_sampler(self, x, sigmas, p):
"""For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes"""
if shared.opts.no_dpmpp_sde_batch_determinism:
return None
from k_diffusion.sampling import BrownianTreeNoiseSampler
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size]
return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds)
def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps)
sigmas = self.get_sigmas(p, steps)
sigma_sched = sigmas[steps - t_enc - 1:]
xi = x + noise * sigma_sched[0]
extra_params_kwargs = self.initialize(p)
parameters = inspect.signature(self.func).parameters
if 'sigma_min' in parameters:
## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last
extra_params_kwargs['sigma_min'] = sigma_sched[-2]
if 'sigma_max' in parameters:
extra_params_kwargs['sigma_max'] = sigma_sched[0]
if 'n' in parameters:
extra_params_kwargs['n'] = len(sigma_sched) - 1
if 'sigma_sched' in parameters:
extra_params_kwargs['sigma_sched'] = sigma_sched
if 'sigmas' in parameters:
extra_params_kwargs['sigmas'] = sigma_sched
if self.funcname == 'sample_dpmpp_sde':
noise_sampler = self.create_noise_sampler(x, sigmas, p)
extra_params_kwargs['noise_sampler'] = noise_sampler
self.model_wrap_cfg.init_latent = x
self.last_latent = x
extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale,
}
samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples
def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None):
steps = steps or p.steps
sigmas = self.get_sigmas(p, steps)
x = x * sigmas[0]
extra_params_kwargs = self.initialize(p)
parameters = inspect.signature(self.func).parameters
if 'sigma_min' in parameters:
extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item()
extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item()
if 'n' in parameters:
extra_params_kwargs['n'] = steps
else:
extra_params_kwargs['sigmas'] = sigmas
if self.funcname == 'sample_dpmpp_sde':
noise_sampler = self.create_noise_sampler(x, sigmas, p)
extra_params_kwargs['noise_sampler'] = noise_sampler
self.last_latent = x
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
'cond': conditioning,
'image_cond': image_conditioning,
'uncond': unconditional_conditioning,
'cond_scale': p.cfg_scale
}, disable=False, callback=self.callback_state, **extra_params_kwargs))
return samples

View File

@ -3,13 +3,12 @@ import safetensors.torch
import os import os
import collections import collections
from collections import namedtuple from collections import namedtuple
from modules import shared, devices, script_callbacks, sd_models from modules import paths, shared, devices, script_callbacks, sd_models
from modules.paths import models_path
import glob import glob
from copy import deepcopy from copy import deepcopy
vae_path = os.path.abspath(os.path.join(models_path, "VAE")) vae_path = os.path.abspath(os.path.join(paths.models_path, "VAE"))
vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"} vae_ignore_keys = {"model_ema.decay", "model_ema.num_updates"}
vae_dict = {} vae_dict = {}

View File

@ -35,8 +35,11 @@ def model():
global sd_vae_approx_model global sd_vae_approx_model
if sd_vae_approx_model is None: if sd_vae_approx_model is None:
model_path = os.path.join(paths.models_path, "VAE-approx", "model.pt")
sd_vae_approx_model = VAEApprox() sd_vae_approx_model = VAEApprox()
sd_vae_approx_model.load_state_dict(torch.load(os.path.join(paths.models_path, "VAE-approx", "model.pt"), map_location='cpu' if devices.device.type != 'cuda' else None)) if not os.path.exists(model_path):
model_path = os.path.join(paths.script_path, "models", "VAE-approx", "model.pt")
sd_vae_approx_model.load_state_dict(torch.load(model_path, map_location='cpu' if devices.device.type != 'cuda' else None))
sd_vae_approx_model.eval() sd_vae_approx_model.eval()
sd_vae_approx_model.to(devices.device, devices.dtype) sd_vae_approx_model.to(devices.device, devices.dtype)

View File

@ -13,101 +13,21 @@ import modules.interrogate
import modules.memmon import modules.memmon
import modules.styles import modules.styles
import modules.devices as devices import modules.devices as devices
from modules import localization, sd_vae, extensions, script_loading, errors, ui_components from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args
from modules.paths import models_path, script_path, sd_path from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir
demo = None demo = None
sd_default_config = os.path.join(script_path, "configs/v1-inference.yaml") parser = cmd_args.parser
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
parser = argparse.ArgumentParser() script_loading.preload_extensions(extensions_dir, parser)
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",) script_loading.preload_extensions(extensions_builtin_dir, parser)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
parser.add_argument("--vae-dir", type=str, default=None, help="Path to directory with VAE files")
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--textual-inversion-templates-dir", type=str, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates")
parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
parser.add_argument("--ngrok-region", type=str, help="The region in which ngrok should start.", default="us")
parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)")
parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.")
parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization")
parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024)
parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None)
parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(script_path, 'ui-config.json'))
parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None)
parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)")
parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
parser.add_argument("--api-log", action='store_true', help="use api-log=True to enable logging of all API requests")
parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the API instead of the webui")
parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origin(s) in the form of a comma-separated list (no spaces)", default=None)
parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS origin(s) in the form of a single regular expression", default=None)
parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
parser.add_argument("--gradio-queue", action='store_true', help="Uses gradio queue; experimental option; breaks restart UI button")
if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
cmd_opts = parser.parse_args()
else:
cmd_opts, _ = parser.parse_known_args()
script_loading.preload_extensions(extensions.extensions_dir, parser)
script_loading.preload_extensions(extensions.extensions_builtin_dir, parser)
cmd_opts = parser.parse_args()
restricted_opts = { restricted_opts = {
"samples_filename_pattern", "samples_filename_pattern",
@ -124,12 +44,13 @@ restricted_opts = {
ui_reorder_categories = [ ui_reorder_categories = [
"inpaint", "inpaint",
"sampler", "sampler",
"checkboxes",
"hires_fix",
"dimensions", "dimensions",
"cfg", "cfg",
"seed", "seed",
"checkboxes",
"hires_fix",
"batch", "batch",
"override_settings",
"scripts", "scripts",
] ]
@ -263,12 +184,6 @@ interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = [] face_restorers = []
def realesrgan_models_names():
import modules.realesrgan_model
return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
class OptionInfo: class OptionInfo:
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None): def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None):
self.default = default self.default = default
@ -303,6 +218,7 @@ def list_samplers():
hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config}
tab_names = []
options_templates = {} options_templates = {}
@ -324,10 +240,16 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."), "save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"),
"save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"),
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
"export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"), "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"),
"export_for_4chan": OptionInfo(True, "If the saved image file size is above the limit, or its either width or height are above the limit, save a downscaled copy as JPG"),
"img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number),
"target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number),
"img_max_size_mp": OptionInfo(200, "Maximum image size, in megapixels", gr.Number),
"use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"), "use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
"use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
"do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"),
@ -349,22 +271,22 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), {
})) }))
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"), "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
"grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"), "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern", component_args=hide_dirs), "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}), "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
})) }))
options_templates.update(options_section(('upscaling', "Upscaling"), { options_templates.update(options_section(('upscaling', "Upscaling"), {
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}), "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
})) }))
options_templates.update(options_section(('face-restoration', "Face restoration"), { options_templates.update(options_section(('face-restoration', "Face restoration"), {
"face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), "face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}),
"code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), "code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
})) }))
@ -396,7 +318,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints),
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": ["Automatic", "None"] + list(sd_vae.vae_dict)}, refresh=sd_vae.refresh_vae_list), "sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list),
"sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), "sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}), "initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}),
@ -408,12 +330,13 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }),
"CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}),
"extra_networks_default_multiplier": OptionInfo(1.0, "Multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
})) }))
options_templates.update(options_section(('compatibility', "Compatibility"), { options_templates.update(options_section(('compatibility', "Compatibility"), {
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
"no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
"use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."), "use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."),
})) }))
@ -433,15 +356,22 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"),
})) }))
options_templates.update(options_section(('extra_networks', "Extra Networks"), { options_templates.update(options_section(('extra_networks', "Extra Networks"), {
"extra_networks_default_view": OptionInfo("cards", "Default view for Extra Networks", gr.Dropdown, { "choices": ["cards", "thumbs"] }), "extra_networks_default_view": OptionInfo("cards", "Default view for Extra Networks", gr.Dropdown, {"choices": ["cards", "thumbs"]}),
"extra_networks_default_multiplier": OptionInfo(1.0, "Multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"),
"extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"),
"extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"),
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": [""] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
})) }))
options_templates.update(options_section(('ui', "User interface"), { options_templates.update(options_section(('ui', "User interface"), {
"return_grid": OptionInfo(True, "Show grid in results for web"), "return_grid": OptionInfo(True, "Show grid in results for web"),
"return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
"return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(True, "Add model name to generation information"), "add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
"disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."), "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"), "send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
"send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"), "send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
"font": OptionInfo("", "Font for image grids that have text"), "font": OptionInfo("", "Font for image grids that have text"),
@ -453,6 +383,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
"keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}), "keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
"quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"), "quicksettings": OptionInfo("sd_model_checkpoint", "Quicksettings list"),
"hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}),
"ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"),
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"),
"localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
@ -478,15 +409,21 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}),
'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma"), 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma"),
'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}),
'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}),
'uni_pc_order': OptionInfo(3, "UniPC order (must be < sampling steps)", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}),
'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final"),
})) }))
options_templates.update(options_section(('postprocessing', "Postprocessing"), { options_templates.update(options_section(('postprocessing', "Postprocessing"), {
'postprocessing_scipts_order': OptionInfo("upscale, gfpgan, codeformer", "Postprocessing operation order"), 'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), 'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
})) }))
options_templates.update(options_section((None, "Hidden options"), { options_templates.update(options_section((None, "Hidden options"), {
"disabled_extensions": OptionInfo([], "Disable those extensions"), "disabled_extensions": OptionInfo([], "Disable these extensions"),
"disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}),
"sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"), "sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"),
})) }))
@ -551,6 +488,15 @@ class Options:
return True return True
def get_default(self, key):
"""returns the default value for the key"""
data_label = self.data_labels.get(key)
if data_label is None:
return None
return data_label.default
def save(self, filename): def save(self, filename):
assert not cmd_opts.freeze_settings, "saving settings is disabled" assert not cmd_opts.freeze_settings, "saving settings is disabled"
@ -605,11 +551,37 @@ class Options:
self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])} self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])}
def cast_value(self, key, value):
"""casts an arbitrary to the same type as this setting's value with key
Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str)
"""
if value is None:
return None
default_value = self.data_labels[key].default
if default_value is None:
default_value = getattr(self, key, None)
if default_value is None:
return None
expected_type = type(default_value)
if expected_type == bool and value == "False":
value = False
else:
value = expected_type(value)
return value
opts = Options() opts = Options()
if os.path.exists(config_filename): if os.path.exists(config_filename):
opts.load(config_filename) opts.load(config_filename)
settings_components = None
"""assinged from ui.py, a mapping on setting anmes to gradio components repsponsible for those settings"""
latent_upscale_default_mode = "Latent" latent_upscale_default_mode = "Latent"
latent_upscale_modes = { latent_upscale_modes = {
"Latent": {"mode": "bilinear", "antialias": False}, "Latent": {"mode": "bilinear", "antialias": False},
@ -657,6 +629,7 @@ class TotalTQDM:
def clear(self): def clear(self):
if self._tqdm is not None: if self._tqdm is not None:
self._tqdm.refresh()
self._tqdm.close() self._tqdm.close()
self._tqdm = None self._tqdm = None
@ -668,7 +641,7 @@ mem_mon.start()
def listfiles(dirname): def listfiles(dirname):
filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname)) if not x.startswith(".")] filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=str.lower) if not x.startswith(".")]
return [file for file in filenames if os.path.isfile(file)] return [file for file in filenames if os.path.isfile(file)]

23
modules/shared_items.py Normal file
View File

@ -0,0 +1,23 @@
def realesrgan_models_names():
import modules.realesrgan_model
return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)]
def postprocessing_scripts():
import modules.scripts
return modules.scripts.scripts_postproc.scripts
def sd_vae_items():
import modules.sd_vae
return ["Automatic", "None"] + list(modules.sd_vae.vae_dict)
def refresh_vae_list():
import modules.sd_vae
modules.sd_vae.refresh_vae_list()

View File

@ -67,7 +67,7 @@ def _summarize_chunk(
max_score, _ = torch.max(attn_weights, -1, keepdim=True) max_score, _ = torch.max(attn_weights, -1, keepdim=True)
max_score = max_score.detach() max_score = max_score.detach()
exp_weights = torch.exp(attn_weights - max_score) exp_weights = torch.exp(attn_weights - max_score)
exp_values = torch.bmm(exp_weights, value) exp_values = torch.bmm(exp_weights, value) if query.device.type == 'mps' else torch.bmm(exp_weights, value.to(exp_weights.dtype)).to(value.dtype)
max_score = max_score.squeeze(-1) max_score = max_score.squeeze(-1)
return AttnChunk(exp_values, exp_weights.sum(dim=-1), max_score) return AttnChunk(exp_values, exp_weights.sum(dim=-1), max_score)
@ -129,7 +129,7 @@ def _get_attention_scores_no_kv_chunking(
) )
attn_probs = attn_scores.softmax(dim=-1) attn_probs = attn_scores.softmax(dim=-1)
del attn_scores del attn_scores
hidden_states_slice = torch.bmm(attn_probs, value) hidden_states_slice = torch.bmm(attn_probs, value) if query.device.type == 'mps' else torch.bmm(attn_probs, value.to(attn_probs.dtype)).to(value.dtype)
return hidden_states_slice return hidden_states_slice

View File

@ -19,9 +19,10 @@ re_numbers_at_start = re.compile(r"^[-\d]+\s*")
class DatasetEntry: class DatasetEntry:
def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, cond_text=None, pixel_values=None): def __init__(self, filename=None, filename_text=None, latent_dist=None, latent_sample=None, cond=None, cond_text=None, pixel_values=None, weight=None):
self.filename = filename self.filename = filename
self.filename_text = filename_text self.filename_text = filename_text
self.weight = weight
self.latent_dist = latent_dist self.latent_dist = latent_dist
self.latent_sample = latent_sample self.latent_sample = latent_sample
self.cond = cond self.cond = cond
@ -30,7 +31,7 @@ class DatasetEntry:
class PersonalizedBase(Dataset): class PersonalizedBase(Dataset):
def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False): def __init__(self, data_root, width, height, repeats, flip_p=0.5, placeholder_token="*", model=None, cond_model=None, device=None, template_file=None, include_cond=False, batch_size=1, gradient_step=1, shuffle_tags=False, tag_drop_out=0, latent_sampling_method='once', varsize=False, use_weight=False):
re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None re_word = re.compile(shared.opts.dataset_filename_word_regex) if len(shared.opts.dataset_filename_word_regex) > 0 else None
self.placeholder_token = placeholder_token self.placeholder_token = placeholder_token
@ -56,10 +57,16 @@ class PersonalizedBase(Dataset):
print("Preparing dataset...") print("Preparing dataset...")
for path in tqdm.tqdm(self.image_paths): for path in tqdm.tqdm(self.image_paths):
alpha_channel = None
if shared.state.interrupted: if shared.state.interrupted:
raise Exception("interrupted") raise Exception("interrupted")
try: try:
image = Image.open(path).convert('RGB') image = Image.open(path)
#Currently does not work for single color transparency
#We would need to read image.info['transparency'] for that
if use_weight and 'A' in image.getbands():
alpha_channel = image.getchannel('A')
image = image.convert('RGB')
if not varsize: if not varsize:
image = image.resize((width, height), PIL.Image.BICUBIC) image = image.resize((width, height), PIL.Image.BICUBIC)
except Exception: except Exception:
@ -87,17 +94,35 @@ class PersonalizedBase(Dataset):
with devices.autocast(): with devices.autocast():
latent_dist = model.encode_first_stage(torchdata.unsqueeze(dim=0)) latent_dist = model.encode_first_stage(torchdata.unsqueeze(dim=0))
if latent_sampling_method == "once" or (latent_sampling_method == "deterministic" and not isinstance(latent_dist, DiagonalGaussianDistribution)): #Perform latent sampling, even for random sampling.
latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) #We need the sample dimensions for the weights
latent_sampling_method = "once" if latent_sampling_method == "deterministic":
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample) if isinstance(latent_dist, DiagonalGaussianDistribution):
elif latent_sampling_method == "deterministic": # Works only for DiagonalGaussianDistribution
# Works only for DiagonalGaussianDistribution latent_dist.std = 0
latent_dist.std = 0 else:
latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu) latent_sampling_method = "once"
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample) latent_sample = model.get_first_stage_encoding(latent_dist).squeeze().to(devices.cpu)
elif latent_sampling_method == "random":
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist) if use_weight and alpha_channel is not None:
channels, *latent_size = latent_sample.shape
weight_img = alpha_channel.resize(latent_size)
npweight = np.array(weight_img).astype(np.float32)
#Repeat for every channel in the latent sample
weight = torch.tensor([npweight] * channels).reshape([channels] + latent_size)
#Normalize the weight to a minimum of 0 and a mean of 1, that way the loss will be comparable to default.
weight -= weight.min()
weight /= weight.mean()
elif use_weight:
#If an image does not have a alpha channel, add a ones weight map anyway so we can stack it later
weight = torch.ones(latent_sample.shape)
else:
weight = None
if latent_sampling_method == "random":
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_dist=latent_dist, weight=weight)
else:
entry = DatasetEntry(filename=path, filename_text=filename_text, latent_sample=latent_sample, weight=weight)
if not (self.tag_drop_out != 0 or self.shuffle_tags): if not (self.tag_drop_out != 0 or self.shuffle_tags):
entry.cond_text = self.create_text(filename_text) entry.cond_text = self.create_text(filename_text)
@ -110,6 +135,7 @@ class PersonalizedBase(Dataset):
del torchdata del torchdata
del latent_dist del latent_dist
del latent_sample del latent_sample
del weight
self.length = len(self.dataset) self.length = len(self.dataset)
self.groups = list(groups.values()) self.groups = list(groups.values())
@ -195,6 +221,10 @@ class BatchLoader:
self.cond_text = [entry.cond_text for entry in data] self.cond_text = [entry.cond_text for entry in data]
self.cond = [entry.cond for entry in data] self.cond = [entry.cond for entry in data]
self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1) self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1)
if all(entry.weight is not None for entry in data):
self.weight = torch.stack([entry.weight for entry in data]).squeeze(1)
else:
self.weight = None
#self.emb_index = [entry.emb_index for entry in data] #self.emb_index = [entry.emb_index for entry in data]
#print(self.latent_sample.device) #print(self.latent_sample.device)

View File

@ -6,8 +6,7 @@ import sys
import tqdm import tqdm
import time import time
from modules import shared, images, deepbooru from modules import paths, shared, images, deepbooru
from modules.paths import models_path
from modules.shared import opts, cmd_opts from modules.shared import opts, cmd_opts
from modules.textual_inversion import autocrop from modules.textual_inversion import autocrop
@ -199,7 +198,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
dnn_model_path = None dnn_model_path = None
try: try:
dnn_model_path = autocrop.download_and_cache_models(os.path.join(models_path, "opencv")) dnn_model_path = autocrop.download_and_cache_models(os.path.join(paths.models_path, "opencv"))
except Exception as e: except Exception as e:
print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e) print("Unable to load face detection model for auto crop selection. Falling back to lower quality haar method.", e)

View File

@ -112,6 +112,7 @@ class EmbeddingDatabase:
self.skipped_embeddings = {} self.skipped_embeddings = {}
self.expected_shape = -1 self.expected_shape = -1
self.embedding_dirs = {} self.embedding_dirs = {}
self.previously_displayed_embeddings = ()
def add_embedding_dir(self, path): def add_embedding_dir(self, path):
self.embedding_dirs[path] = DirWithTextualInversionEmbeddings(path) self.embedding_dirs[path] = DirWithTextualInversionEmbeddings(path)
@ -151,7 +152,11 @@ class EmbeddingDatabase:
name = data.get('name', name) name = data.get('name', name)
else: else:
data = extract_image_data_embed(embed_image) data = extract_image_data_embed(embed_image)
name = data.get('name', name) if data:
name = data.get('name', name)
else:
# if data is None, means this is not an embeding, just a preview image
return
elif ext in ['.BIN', '.PT']: elif ext in ['.BIN', '.PT']:
data = torch.load(path, map_location="cpu") data = torch.load(path, map_location="cpu")
elif ext in ['.SAFETENSORS']: elif ext in ['.SAFETENSORS']:
@ -194,7 +199,7 @@ class EmbeddingDatabase:
if not os.path.isdir(embdir.path): if not os.path.isdir(embdir.path):
return return
for root, dirs, fns in os.walk(embdir.path): for root, dirs, fns in os.walk(embdir.path, followlinks=True):
for fn in fns: for fn in fns:
try: try:
fullfn = os.path.join(root, fn) fullfn = os.path.join(root, fn)
@ -228,9 +233,12 @@ class EmbeddingDatabase:
self.load_from_dir(embdir) self.load_from_dir(embdir)
embdir.update() embdir.update()
print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}") displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys()))
if len(self.skipped_embeddings) > 0: if self.previously_displayed_embeddings != displayed_embeddings:
print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}") self.previously_displayed_embeddings = displayed_embeddings
print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}")
if len(self.skipped_embeddings) > 0:
print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}")
def find_embedding_at_position(self, tokens, offset): def find_embedding_at_position(self, tokens, offset):
token = tokens[offset] token = tokens[offset]
@ -347,7 +355,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat
assert log_directory, "Log directory is empty" assert log_directory, "Log directory is empty"
def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_embedding_every, template_filename, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
save_embedding_every = save_embedding_every or 0 save_embedding_every = save_embedding_every or 0
create_image_every = create_image_every or 0 create_image_every = create_image_every or 0
template_file = textual_inversion_templates.get(template_filename, None) template_file = textual_inversion_templates.get(template_filename, None)
@ -406,7 +414,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
pin_memory = shared.opts.pin_memory pin_memory = shared.opts.pin_memory
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize) ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight)
if shared.opts.save_training_settings_to_txt: if shared.opts.save_training_settings_to_txt:
save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()}) save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()})
@ -476,6 +484,8 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
with devices.autocast(): with devices.autocast():
x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
if use_weight:
w = batch.weight.to(devices.device, non_blocking=pin_memory)
c = shared.sd_model.cond_stage_model(batch.cond_text) c = shared.sd_model.cond_stage_model(batch.cond_text)
if is_training_inpainting_model: if is_training_inpainting_model:
@ -486,7 +496,11 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
else: else:
cond = c cond = c
loss = shared.sd_model(x, cond)[0] / gradient_step if use_weight:
loss = shared.sd_model.weighted_forward(x, cond, w)[0] / gradient_step
del w
else:
loss = shared.sd_model.forward(x, cond)[0] / gradient_step
del x del x
_loss_step += loss.item() _loss_step += loss.item()

38
modules/timer.py Normal file
View File

@ -0,0 +1,38 @@
import time
class Timer:
def __init__(self):
self.start = time.time()
self.records = {}
self.total = 0
def elapsed(self):
end = time.time()
res = end - self.start
self.start = end
return res
def record(self, category, extra_time=0):
e = self.elapsed()
if category not in self.records:
self.records[category] = 0
self.records[category] += e + extra_time
self.total += e + extra_time
def summary(self):
res = f"{self.total:.1f}s"
additions = [x for x in self.records.items() if x[1] >= 0.1]
if not additions:
return res
res += " ("
res += ", ".join([f"{category}: {time_taken:.1f}s" for category, time_taken in additions])
res += ")"
return res
def reset(self):
self.__init__()

View File

@ -1,5 +1,6 @@
import modules.scripts import modules.scripts
from modules import sd_samplers from modules import sd_samplers
from modules.generation_parameters_copypaste import create_override_settings_dict
from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \ from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \
StableDiffusionProcessingImg2Img, process_images StableDiffusionProcessingImg2Img, process_images
from modules.shared import opts, cmd_opts from modules.shared import opts, cmd_opts
@ -8,7 +9,9 @@ import modules.processing as processing
from modules.ui import plaintext_to_html from modules.ui import plaintext_to_html
def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, *args): def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
p = StableDiffusionProcessingTxt2Img( p = StableDiffusionProcessingTxt2Img(
sd_model=shared.sd_model, sd_model=shared.sd_model,
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
@ -38,6 +41,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
hr_second_pass_steps=hr_second_pass_steps, hr_second_pass_steps=hr_second_pass_steps,
hr_resize_x=hr_resize_x, hr_resize_x=hr_resize_x,
hr_resize_y=hr_resize_y, hr_resize_y=hr_resize_y,
override_settings=override_settings,
) )
p.scripts = modules.scripts.scripts_txt2img p.scripts = modules.scripts.scripts_txt2img

View File

@ -20,8 +20,8 @@ from PIL import Image, PngImagePlugin
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML
from modules.paths import script_path from modules.paths import script_path, data_path
from modules.shared import opts, cmd_opts, restricted_opts from modules.shared import opts, cmd_opts, restricted_opts
@ -70,17 +70,6 @@ def gr_show(visible=True):
sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg" sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
css_hide_progressbar = """
.wrap .m-12 svg { display:none!important; }
.wrap .m-12::before { content:"Loading..." }
.wrap .z-20 svg { display:none!important; }
.wrap .z-20::before { content:"Loading..." }
.wrap.cover-bg .z-20::before { content:"" }
.progress-bar { display:none!important; }
.meta-text { display:none!important; }
.meta-text-center { display:none!important; }
"""
# Using constants for these since the variation selector isn't visible. # Using constants for these since the variation selector isn't visible.
# Important that they exactly match script.js for tooltip to work. # Important that they exactly match script.js for tooltip to work.
random_symbol = '\U0001f3b2\ufe0f' # 🎲️ random_symbol = '\U0001f3b2\ufe0f' # 🎲️
@ -89,8 +78,9 @@ paste_symbol = '\u2199\ufe0f' # ↙
refresh_symbol = '\U0001f504' # 🔄 refresh_symbol = '\U0001f504' # 🔄
save_style_symbol = '\U0001f4be' # 💾 save_style_symbol = '\U0001f4be' # 💾
apply_style_symbol = '\U0001f4cb' # 📋 apply_style_symbol = '\U0001f4cb' # 📋
clear_prompt_symbol = '\U0001F5D1' # 🗑️ clear_prompt_symbol = '\U0001f5d1\ufe0f' # 🗑️
extra_networks_symbol = '\U0001F3B4' # 🎴 extra_networks_symbol = '\U0001F3B4' # 🎴
switch_values_symbol = '\U000021C5' # ⇅
def plaintext_to_html(text): def plaintext_to_html(text):
@ -178,14 +168,13 @@ def interrogate_deepbooru(image):
def create_seed_inputs(target_interface): def create_seed_inputs(target_interface):
with FormRow(elem_id=target_interface + '_seed_row'): with FormRow(elem_id=target_interface + '_seed_row', variant="compact"):
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed') seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed')
seed.style(container=False) seed.style(container=False)
random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed') random_seed = ToolButton(random_symbol, elem_id=target_interface + '_random_seed')
reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed') reuse_seed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_seed')
with gr.Group(elem_id=target_interface + '_subseed_show_box'): seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False)
seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False)
# Components to show/hide based on the 'Extra' checkbox # Components to show/hide based on the 'Extra' checkbox
seed_extras = [] seed_extras = []
@ -194,8 +183,8 @@ def create_seed_inputs(target_interface):
seed_extras.append(seed_extra_row_1) seed_extras.append(seed_extra_row_1)
subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed') subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed')
subseed.style(container=False) subseed.style(container=False)
random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed') random_subseed = ToolButton(random_symbol, elem_id=target_interface + '_random_subseed')
reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed') reuse_subseed = ToolButton(reuse_symbol, elem_id=target_interface + '_reuse_subseed')
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength') subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength')
with FormRow(visible=False) as seed_extra_row_2: with FormRow(visible=False) as seed_extra_row_2:
@ -290,19 +279,19 @@ def create_toprow(is_img2img):
with gr.Row(): with gr.Row():
with gr.Column(scale=80): with gr.Column(scale=80):
with gr.Row(): with gr.Row():
negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=2, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)") negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)")
button_interrogate = None button_interrogate = None
button_deepbooru = None button_deepbooru = None
if is_img2img: if is_img2img:
with gr.Column(scale=1, elem_id="interrogate_col"): with gr.Column(scale=1, elem_classes="interrogate-col"):
button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"): with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"):
with gr.Row(elem_id=f"{id_part}_generate_box"): with gr.Row(elem_id=f"{id_part}_generate_box", elem_classes="generate-box"):
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt") interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt", elem_classes="generate-box-interrupt")
skip = gr.Button('Skip', elem_id=f"{id_part}_skip") skip = gr.Button('Skip', elem_id=f"{id_part}_skip", elem_classes="generate-box-skip")
submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
skip.click( skip.click(
@ -324,9 +313,9 @@ def create_toprow(is_img2img):
prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply") prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply")
save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create") save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter") token_counter = gr.HTML(value="<span>0/75</span>", elem_id=f"{id_part}_token_counter", elem_classes=["token-counter"])
token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
negative_token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_negative_token_counter") negative_token_counter = gr.HTML(value="<span>0/75</span>", elem_id=f"{id_part}_negative_token_counter", elem_classes=["token-counter"])
negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button") negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button")
clear_prompt_button.click( clear_prompt_button.click(
@ -379,6 +368,7 @@ def apply_setting(key, value):
opts.save(shared.config_filename) opts.save(shared.config_filename)
return getattr(opts, key) return getattr(opts, key)
def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id): def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id):
def refresh(): def refresh():
refresh_method() refresh_method()
@ -432,6 +422,18 @@ def get_value_for_setting(key):
return gr.update(value=value, **args) return gr.update(value=value, **args)
def create_override_settings_dropdown(tabname, row):
dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True)
dropdown.change(
fn=lambda x: gr.Dropdown.update(visible=len(x) > 0),
inputs=[dropdown],
outputs=[dropdown],
)
return dropdown
def create_ui(): def create_ui():
import modules.img2img import modules.img2img
import modules.txt2img import modules.txt2img
@ -465,6 +467,9 @@ def create_ui():
width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
with gr.Column(elem_id="txt2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn")
if opts.dimensions_and_batch_together: if opts.dimensions_and_batch_together:
with gr.Column(elem_id="txt2img_column_batch"): with gr.Column(elem_id="txt2img_column_batch"):
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
@ -477,7 +482,7 @@ def create_ui():
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img')
elif category == "checkboxes": elif category == "checkboxes":
with FormRow(elem_id="txt2img_checkboxes", variant="compact"): with FormRow(elem_classes="checkboxes-row", variant="compact"):
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
@ -501,6 +506,10 @@ def create_ui():
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
elif category == "override_settings":
with FormRow(elem_id="txt2img_override_settings_row") as row:
override_settings = create_override_settings_dropdown('txt2img', row)
elif category == "scripts": elif category == "scripts":
with FormGroup(elem_id="txt2img_script_container"): with FormGroup(elem_id="txt2img_script_container"):
custom_inputs = modules.scripts.scripts_txt2img.setup_ui() custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
@ -522,7 +531,6 @@ def create_ui():
) )
txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
@ -553,6 +561,7 @@ def create_ui():
hr_second_pass_steps, hr_second_pass_steps,
hr_resize_x, hr_resize_x,
hr_resize_y, hr_resize_y,
override_settings,
] + custom_inputs, ] + custom_inputs,
outputs=[ outputs=[
@ -567,6 +576,8 @@ def create_ui():
txt2img_prompt.submit(**txt2img_args) txt2img_prompt.submit(**txt2img_args)
submit.click(**txt2img_args) submit.click(**txt2img_args)
res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False)
txt_prompt_img.change( txt_prompt_img.change(
fn=modules.images.image_data, fn=modules.images.image_data,
inputs=[ inputs=[
@ -610,7 +621,10 @@ def create_ui():
(hr_resize_y, "Hires resize-2"), (hr_resize_y, "Hires resize-2"),
*modules.scripts.scripts_txt2img.infotext_fields *modules.scripts.scripts_txt2img.infotext_fields
] ]
parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields) parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings)
parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None,
))
txt2img_preview_params = [ txt2img_preview_params = [
txt2img_prompt, txt2img_prompt,
@ -691,9 +705,15 @@ def create_ui():
with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
gr.HTML(f"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.<br>Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}</p>") gr.HTML(
f"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
f"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
f"{hidden}</p>"
)
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
def copy_image(img): def copy_image(img):
if isinstance(img, dict) and 'image' in img: if isinstance(img, dict) and 'image' in img:
@ -727,6 +747,9 @@ def create_ui():
width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
with gr.Column(elem_id="img2img_dimensions_row", scale=1, elem_classes="dimensions-tools"):
res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
if opts.dimensions_and_batch_together: if opts.dimensions_and_batch_together:
with gr.Column(elem_id="img2img_column_batch"): with gr.Column(elem_id="img2img_column_batch"):
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
@ -734,14 +757,16 @@ def create_ui():
elif category == "cfg": elif category == "cfg":
with FormGroup(): with FormGroup():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") with FormRow():
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit")
denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
elif category == "seed": elif category == "seed":
seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
elif category == "checkboxes": elif category == "checkboxes":
with FormRow(elem_id="img2img_checkboxes", variant="compact"): with FormRow(elem_classes="checkboxes-row", variant="compact"):
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
@ -751,6 +776,10 @@ def create_ui():
batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
elif category == "override_settings":
with FormRow(elem_id="img2img_override_settings_row") as row:
override_settings = create_override_settings_dropdown('img2img', row)
elif category == "scripts": elif category == "scripts":
with FormGroup(elem_id="img2img_script_container"): with FormGroup(elem_id="img2img_script_container"):
custom_inputs = modules.scripts.scripts_img2img.setup_ui() custom_inputs = modules.scripts.scripts_img2img.setup_ui()
@ -785,7 +814,6 @@ def create_ui():
) )
img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
parameters_copypaste.bind_buttons({"img2img": img2img_paste}, None, img2img_prompt)
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
@ -827,6 +855,7 @@ def create_ui():
batch_count, batch_count,
batch_size, batch_size,
cfg_scale, cfg_scale,
image_cfg_scale,
denoising_strength, denoising_strength,
seed, seed,
subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox, subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
@ -838,6 +867,8 @@ def create_ui():
inpainting_mask_invert, inpainting_mask_invert,
img2img_batch_input_dir, img2img_batch_input_dir,
img2img_batch_output_dir, img2img_batch_output_dir,
img2img_batch_inpaint_mask_dir,
override_settings,
] + custom_inputs, ] + custom_inputs,
outputs=[ outputs=[
img2img_gallery, img2img_gallery,
@ -865,6 +896,7 @@ def create_ui():
img2img_prompt.submit(**img2img_args) img2img_prompt.submit(**img2img_args)
submit.click(**img2img_args) submit.click(**img2img_args)
res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height], show_progress=False)
img2img_interrogate.click( img2img_interrogate.click(
fn=lambda *args: process_interrogate(interrogate, *args), fn=lambda *args: process_interrogate(interrogate, *args),
@ -899,7 +931,7 @@ def create_ui():
) )
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter]) negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter])
ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery) ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery)
@ -910,6 +942,7 @@ def create_ui():
(sampler_index, "Sampler"), (sampler_index, "Sampler"),
(restore_faces, "Face restoration"), (restore_faces, "Face restoration"),
(cfg_scale, "CFG scale"), (cfg_scale, "CFG scale"),
(image_cfg_scale, "Image CFG scale"),
(seed, "Seed"), (seed, "Seed"),
(width, "Size-1"), (width, "Size-1"),
(height, "Size-2"), (height, "Size-2"),
@ -922,8 +955,11 @@ def create_ui():
(mask_blur, "Mask blur"), (mask_blur, "Mask blur"),
*modules.scripts.scripts_img2img.infotext_fields *modules.scripts.scripts_img2img.infotext_fields
] ]
parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields) parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings)
parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields) parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings)
parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None,
))
modules.scripts.scripts_current = None modules.scripts.scripts_current = None
@ -941,7 +977,11 @@ def create_ui():
html2 = gr.HTML() html2 = gr.HTML()
with gr.Row(): with gr.Row():
buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"])
parameters_copypaste.bind_buttons(buttons, image, generation_info)
for tabname, button in buttons.items():
parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image,
))
image.change( image.change(
fn=wrap_gradio_call(modules.extras.run_pnginfo), fn=wrap_gradio_call(modules.extras.run_pnginfo),
@ -1143,6 +1183,8 @@ def create_ui():
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight")
save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding")
preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img")
@ -1256,6 +1298,7 @@ def create_ui():
shuffle_tags, shuffle_tags,
tag_drop_out, tag_drop_out,
latent_sampling_method, latent_sampling_method,
use_weight,
create_image_every, create_image_every,
save_embedding_every, save_embedding_every,
template_file, template_file,
@ -1289,6 +1332,7 @@ def create_ui():
shuffle_tags, shuffle_tags,
tag_drop_out, tag_drop_out,
latent_sampling_method, latent_sampling_method,
use_weight,
create_image_every, create_image_every,
save_embedding_every, save_embedding_every,
template_file, template_file,
@ -1350,6 +1394,7 @@ def create_ui():
components = [] components = []
component_dict = {} component_dict = {}
shared.settings_components = component_dict
script_callbacks.ui_settings_callback() script_callbacks.ui_settings_callback()
opts.reorder() opts.reorder()
@ -1438,11 +1483,33 @@ def create_ui():
request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
download_localization = gr.Button(value='Download localization template', elem_id="download_localization") download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
with gr.Row():
unload_sd_model = gr.Button(value='Unload SD checkpoint to free VRAM', elem_id="sett_unload_sd_model")
reload_sd_model = gr.Button(value='Reload the last SD checkpoint back into VRAM', elem_id="sett_reload_sd_model")
with gr.TabItem("Licenses"): with gr.TabItem("Licenses"):
gr.HTML(shared.html("licenses.html"), elem_id="licenses") gr.HTML(shared.html("licenses.html"), elem_id="licenses")
gr.Button(value="Show all pages", elem_id="settings_show_all_pages") gr.Button(value="Show all pages", elem_id="settings_show_all_pages")
def unload_sd_weights():
modules.sd_models.unload_model_weights()
def reload_sd_weights():
modules.sd_models.reload_model_weights()
unload_sd_model.click(
fn=unload_sd_weights,
inputs=[],
outputs=[]
)
reload_sd_model.click(
fn=reload_sd_weights,
inputs=[],
outputs=[]
)
request_notifications.click( request_notifications.click(
fn=lambda: None, fn=lambda: None,
@ -1488,39 +1555,28 @@ def create_ui():
(train_interface, "Train", "ti"), (train_interface, "Train", "ti"),
] ]
css = ""
for cssfile in modules.scripts.list_files_with_name("style.css"):
if not os.path.isfile(cssfile):
continue
with open(cssfile, "r", encoding="utf8") as file:
css += file.read() + "\n"
if os.path.exists(os.path.join(script_path, "user.css")):
with open(os.path.join(script_path, "user.css"), "r", encoding="utf8") as file:
css += file.read() + "\n"
if not cmd_opts.no_progressbar_hiding:
css += css_hide_progressbar
interfaces += script_callbacks.ui_tabs_callback() interfaces += script_callbacks.ui_tabs_callback()
interfaces += [(settings_interface, "Settings", "settings")] interfaces += [(settings_interface, "Settings", "settings")]
extensions_interface = ui_extensions.create_ui() extensions_interface = ui_extensions.create_ui()
interfaces += [(extensions_interface, "Extensions", "extensions")] interfaces += [(extensions_interface, "Extensions", "extensions")]
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo: shared.tab_names = []
for _interface, label, _ifid in interfaces:
shared.tab_names.append(label)
with gr.Blocks(analytics_enabled=False, title="Stable Diffusion") as demo:
with gr.Row(elem_id="quicksettings", variant="compact"): with gr.Row(elem_id="quicksettings", variant="compact"):
for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
component = create_setting_component(k, is_quicksettings=True) component = create_setting_component(k, is_quicksettings=True)
component_dict[k] = component component_dict[k] = component
parameters_copypaste.integrate_settings_paste_fields(component_dict) parameters_copypaste.connect_paste_params_buttons()
parameters_copypaste.run_bind()
with gr.Tabs(elem_id="tabs") as tabs: with gr.Tabs(elem_id="tabs") as tabs:
for interface, label, ifid in interfaces: for interface, label, ifid in interfaces:
if label in shared.opts.hidden_tabs:
continue
with gr.TabItem(label, id=ifid, elem_id='tab_' + ifid): with gr.TabItem(label, id=ifid, elem_id='tab_' + ifid):
interface.render() interface.render()
@ -1540,13 +1596,29 @@ def create_ui():
for i, k, item in quicksettings_list: for i, k, item in quicksettings_list:
component = component_dict[k] component = component_dict[k]
info = opts.data_labels[k]
component.change( component.change(
fn=lambda value, k=k: run_settings_single(value, key=k), fn=lambda value, k=k: run_settings_single(value, key=k),
inputs=[component], inputs=[component],
outputs=[component, text_settings], outputs=[component, text_settings],
show_progress=info.refresh is not None,
) )
text_settings.change(
fn=lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit"),
inputs=[],
outputs=[image_cfg_scale],
)
button_set_checkpoint = gr.Button('Change checkpoint', elem_id='change_checkpoint', visible=False)
button_set_checkpoint.click(
fn=lambda value, _: run_settings_single(value, key='sd_model_checkpoint'),
_js="function(v){ var res = desiredCheckpointName; desiredCheckpointName = ''; return [res || v, null]; }",
inputs=[component_dict['sd_model_checkpoint'], dummy_component],
outputs=[component_dict['sd_model_checkpoint'], text_settings],
)
component_keys = [k for k in opts.data_labels.keys() if k in component_dict] component_keys = [k for k in opts.data_labels.keys() if k in component_dict]
def get_settings_values(): def get_settings_values():
@ -1556,6 +1628,7 @@ def create_ui():
fn=get_settings_values, fn=get_settings_values,
inputs=[], inputs=[],
outputs=[component_dict[k] for k in component_keys], outputs=[component_dict[k] for k in component_keys],
queue=False,
) )
def modelmerger(*args): def modelmerger(*args):
@ -1678,21 +1751,60 @@ def create_ui():
return demo return demo
def reload_javascript(): def webpath(fn):
head = f'<script type="text/javascript" src="file={os.path.abspath("script.js")}"></script>\n' if fn.startswith(script_path):
web_path = os.path.relpath(fn, script_path).replace('\\', '/')
else:
web_path = os.path.abspath(fn)
return f'file={web_path}?{os.path.getmtime(fn)}'
def javascript_html():
script_js = os.path.join(script_path, "script.js")
head = f'<script type="text/javascript" src="{webpath(script_js)}"></script>\n'
inline = f"{localization.localization_js(shared.opts.localization)};" inline = f"{localization.localization_js(shared.opts.localization)};"
if cmd_opts.theme is not None: if cmd_opts.theme is not None:
inline += f"set_theme('{cmd_opts.theme}');" inline += f"set_theme('{cmd_opts.theme}');"
for script in modules.scripts.list_scripts("javascript", ".js"): for script in modules.scripts.list_scripts("javascript", ".js"):
head += f'<script type="text/javascript" src="file={script.path}"></script>\n' head += f'<script type="text/javascript" src="{webpath(script.path)}"></script>\n'
for script in modules.scripts.list_scripts("javascript", ".mjs"):
head += f'<script type="module" src="{webpath(script.path)}"></script>\n'
head += f'<script type="text/javascript">{inline}</script>\n' head += f'<script type="text/javascript">{inline}</script>\n'
return head
def css_html():
head = ""
def stylesheet(fn):
return f'<link rel="stylesheet" property="stylesheet" href="{webpath(fn)}">'
for cssfile in modules.scripts.list_files_with_name("style.css"):
if not os.path.isfile(cssfile):
continue
head += stylesheet(cssfile)
if os.path.exists(os.path.join(data_path, "user.css")):
head += stylesheet(os.path.join(data_path, "user.css"))
return head
def reload_javascript():
js = javascript_html()
css = css_html()
def template_response(*args, **kwargs): def template_response(*args, **kwargs):
res = shared.GradioTemplateResponseOriginal(*args, **kwargs) res = shared.GradioTemplateResponseOriginal(*args, **kwargs)
res.body = res.body.replace(b'</head>', f'{head}</head>'.encode("utf8")) res.body = res.body.replace(b'</head>', f'{js}</head>'.encode("utf8"))
res.body = res.body.replace(b'</body>', f'{css}</body>'.encode("utf8"))
res.init_headers() res.init_headers()
return res return res
@ -1720,7 +1832,7 @@ def versions_html():
return f""" return f"""
python: <span title="{sys.version}">{python_version}</span> python: <span title="{sys.version}">{python_version}</span>
     
torch: {torch.__version__} torch: {getattr(torch, '__long_version__',torch.__version__)}
     
xformers: {xformers_version} xformers: {xformers_version}
     

View File

@ -129,8 +129,8 @@ Requested path was: {f}
generation_info = None generation_info = None
with gr.Column(): with gr.Column():
with gr.Row(elem_id=f"image_buttons_{tabname}"): with gr.Row(elem_id=f"image_buttons_{tabname}", elem_classes="image-buttons"):
open_folder_button = gr.Button(folder_symbol, elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else f'open_folder_{tabname}') open_folder_button = gr.Button(folder_symbol, visible=not shared.cmd_opts.hide_ui_dir_config)
if tabname != "extras": if tabname != "extras":
save = gr.Button('Save', elem_id=f'save_{tabname}') save = gr.Button('Save', elem_id=f'save_{tabname}')
@ -145,11 +145,10 @@ Requested path was: {f}
) )
if tabname != "extras": if tabname != "extras":
with gr.Row(): download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False, elem_id=f'download_files_{tabname}')
download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False, elem_id=f'download_files_{tabname}')
with gr.Group(): with gr.Group():
html_info = gr.HTML(elem_id=f'html_info_{tabname}') html_info = gr.HTML(elem_id=f'html_info_{tabname}', elem_classes="infotext")
html_log = gr.HTML(elem_id=f'html_log_{tabname}') html_log = gr.HTML(elem_id=f'html_log_{tabname}')
generation_info = gr.Textbox(visible=False, elem_id=f'generation_info_{tabname}') generation_info = gr.Textbox(visible=False, elem_id=f'generation_info_{tabname}')
@ -160,6 +159,7 @@ Requested path was: {f}
_js="function(x, y, z){ return [x, y, selected_gallery_index()] }", _js="function(x, y, z){ return [x, y, selected_gallery_index()] }",
inputs=[generation_info, html_info, html_info], inputs=[generation_info, html_info, html_info],
outputs=[html_info, html_info], outputs=[html_info, html_info],
show_progress=False,
) )
save.click( save.click(
@ -195,8 +195,19 @@ Requested path was: {f}
else: else:
html_info_x = gr.HTML(elem_id=f'html_info_x_{tabname}') html_info_x = gr.HTML(elem_id=f'html_info_x_{tabname}')
html_info = gr.HTML(elem_id=f'html_info_{tabname}') html_info = gr.HTML(elem_id=f'html_info_{tabname}', elem_classes="infotext")
html_log = gr.HTML(elem_id=f'html_log_{tabname}') html_log = gr.HTML(elem_id=f'html_log_{tabname}')
parameters_copypaste.bind_buttons(buttons, result_gallery, "txt2img" if tabname == "txt2img" else None) paste_field_names = []
if tabname == "txt2img":
paste_field_names = modules.scripts.scripts_txt2img.paste_field_names
elif tabname == "img2img":
paste_field_names = modules.scripts.scripts_img2img.paste_field_names
for paste_tabname, paste_button in buttons.items():
parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
paste_button=paste_button, tabname=paste_tabname, source_tabname="txt2img" if tabname == "txt2img" else None, source_image_component=result_gallery,
paste_field_names=paste_field_names
))
return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log

View File

@ -1,50 +1,64 @@
import gradio as gr import gradio as gr
class ToolButton(gr.Button, gr.components.FormComponent): class FormComponent:
def get_expected_parent(self):
return gr.components.Form
gr.Dropdown.get_expected_parent = FormComponent.get_expected_parent
class ToolButton(FormComponent, gr.Button):
"""Small button with single emoji as text, fits inside gradio forms""" """Small button with single emoji as text, fits inside gradio forms"""
def __init__(self, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(variant="tool", **kwargs) classes = kwargs.pop("elem_classes", [])
super().__init__(*args, elem_classes=["tool", *classes], **kwargs)
def get_block_name(self): def get_block_name(self):
return "button" return "button"
class ToolButtonTop(gr.Button, gr.components.FormComponent): class FormRow(FormComponent, gr.Row):
"""Small button with single emoji as text, with extra margin at top, fits inside gradio forms"""
def __init__(self, **kwargs):
super().__init__(variant="tool-top", **kwargs)
def get_block_name(self):
return "button"
class FormRow(gr.Row, gr.components.FormComponent):
"""Same as gr.Row but fits inside gradio forms""" """Same as gr.Row but fits inside gradio forms"""
def get_block_name(self): def get_block_name(self):
return "row" return "row"
class FormGroup(gr.Group, gr.components.FormComponent): class FormColumn(FormComponent, gr.Column):
"""Same as gr.Column but fits inside gradio forms"""
def get_block_name(self):
return "column"
class FormGroup(FormComponent, gr.Group):
"""Same as gr.Row but fits inside gradio forms""" """Same as gr.Row but fits inside gradio forms"""
def get_block_name(self): def get_block_name(self):
return "group" return "group"
class FormHTML(gr.HTML, gr.components.FormComponent): class FormHTML(FormComponent, gr.HTML):
"""Same as gr.HTML but fits inside gradio forms""" """Same as gr.HTML but fits inside gradio forms"""
def get_block_name(self): def get_block_name(self):
return "html" return "html"
class FormColorPicker(gr.ColorPicker, gr.components.FormComponent): class FormColorPicker(FormComponent, gr.ColorPicker):
"""Same as gr.ColorPicker but fits inside gradio forms""" """Same as gr.ColorPicker but fits inside gradio forms"""
def get_block_name(self): def get_block_name(self):
return "colorpicker" return "colorpicker"
class DropdownMulti(FormComponent, gr.Dropdown):
"""Same as gr.Dropdown but always multiselect"""
def __init__(self, **kwargs):
super().__init__(multiselect=True, **kwargs)
def get_block_name(self):
return "dropdown"

View File

@ -1,6 +1,5 @@
import json import json
import os.path import os.path
import shutil
import sys import sys
import time import time
import traceback import traceback
@ -13,7 +12,7 @@ import shutil
import errno import errno
from modules import extensions, shared, paths from modules import extensions, shared, paths
from modules.call_queue import wrap_gradio_gpu_call
available_extensions = {"extensions": []} available_extensions = {"extensions": []}
@ -22,7 +21,7 @@ def check_access():
assert not shared.cmd_opts.disable_extension_access, "extension access disabled because of command line flags" assert not shared.cmd_opts.disable_extension_access, "extension access disabled because of command line flags"
def apply_and_restart(disable_list, update_list): def apply_and_restart(disable_list, update_list, disable_all):
check_access() check_access()
disabled = json.loads(disable_list) disabled = json.loads(disable_list)
@ -44,26 +43,37 @@ def apply_and_restart(disable_list, update_list):
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
shared.opts.disabled_extensions = disabled shared.opts.disabled_extensions = disabled
shared.opts.disable_all_extensions = disable_all
shared.opts.save(shared.config_filename) shared.opts.save(shared.config_filename)
shared.state.interrupt() shared.state.interrupt()
shared.state.need_restart = True shared.state.need_restart = True
def check_updates(): def check_updates(id_task, disable_list):
check_access() check_access()
for ext in extensions.extensions: disabled = json.loads(disable_list)
if ext.remote is None: assert type(disabled) == list, f"wrong disable_list data for apply_and_restart: {disable_list}"
continue
exts = [ext for ext in extensions.extensions if ext.remote is not None and ext.name not in disabled]
shared.state.job_count = len(exts)
for ext in exts:
shared.state.textinfo = ext.name
try: try:
ext.check_updates() ext.check_updates()
except FileNotFoundError as e:
if 'FETCH_HEAD' not in str(e):
raise
except Exception: except Exception:
print(f"Error checking updates for {ext.name}:", file=sys.stderr) print(f"Error checking updates for {ext.name}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
return extension_table() shared.state.nextjob()
return extension_table(), ""
def extension_table(): def extension_table():
@ -73,6 +83,7 @@ def extension_table():
<tr> <tr>
<th><abbr title="Use checkbox to enable the extension; it will be enabled or disabled when you click apply button">Extension</abbr></th> <th><abbr title="Use checkbox to enable the extension; it will be enabled or disabled when you click apply button">Extension</abbr></th>
<th>URL</th> <th>URL</th>
<th><abbr title="Extension version">Version</abbr></th>
<th><abbr title="Use checkbox to mark the extension for update; it will be updated when you click apply button">Update</abbr></th> <th><abbr title="Use checkbox to mark the extension for update; it will be updated when you click apply button">Update</abbr></th>
</tr> </tr>
</thead> </thead>
@ -80,21 +91,24 @@ def extension_table():
""" """
for ext in extensions.extensions: for ext in extensions.extensions:
remote = "" ext.read_info_from_repo()
if ext.is_builtin:
remote = "built-in" remote = f"""<a href="{html.escape(ext.remote or '')}" target="_blank">{html.escape("built-in" if ext.is_builtin else ext.remote or '')}</a>"""
elif ext.remote:
remote = f"""<a href="{html.escape(ext.remote or '')}" target="_blank">{html.escape("built-in" if ext.is_builtin else ext.remote or '')}</a>"""
if ext.can_update: if ext.can_update:
ext_status = f"""<label><input class="gr-check-radio gr-checkbox" name="update_{html.escape(ext.name)}" checked="checked" type="checkbox">{html.escape(ext.status)}</label>""" ext_status = f"""<label><input class="gr-check-radio gr-checkbox" name="update_{html.escape(ext.name)}" checked="checked" type="checkbox">{html.escape(ext.status)}</label>"""
else: else:
ext_status = ext.status ext_status = ext.status
style = ""
if shared.opts.disable_all_extensions == "extra" and not ext.is_builtin or shared.opts.disable_all_extensions == "all":
style = ' style="color: var(--primary-400)"'
code += f""" code += f"""
<tr> <tr>
<td><label><input class="gr-check-radio gr-checkbox" name="enable_{html.escape(ext.name)}" type="checkbox" {'checked="checked"' if ext.enabled else ''}>{html.escape(ext.name)}</label></td> <td><label{style}><input class="gr-check-radio gr-checkbox" name="enable_{html.escape(ext.name)}" type="checkbox" {'checked="checked"' if ext.enabled else ''}>{html.escape(ext.name)}</label></td>
<td>{remote}</td> <td>{remote}</td>
<td>{ext.version}</td>
<td{' class="extension_status"' if ext.remote is not None else ''}>{ext_status}</td> <td{' class="extension_status"' if ext.remote is not None else ''}>{ext_status}</td>
</tr> </tr>
""" """
@ -132,26 +146,24 @@ def install_extension_from_url(dirname, url):
normalized_url = normalize_git_url(url) normalized_url = normalize_git_url(url)
assert len([x for x in extensions.extensions if normalize_git_url(x.remote) == normalized_url]) == 0, 'Extension with this URL is already installed' assert len([x for x in extensions.extensions if normalize_git_url(x.remote) == normalized_url]) == 0, 'Extension with this URL is already installed'
tmpdir = os.path.join(paths.script_path, "tmp", dirname) tmpdir = os.path.join(paths.data_path, "tmp", dirname)
try: try:
shutil.rmtree(tmpdir, True) shutil.rmtree(tmpdir, True)
with git.Repo.clone_from(url, tmpdir) as repo:
repo = git.Repo.clone_from(url, tmpdir) repo.remote().fetch()
repo.remote().fetch() for submodule in repo.submodules:
submodule.update()
try: try:
os.rename(tmpdir, target_dir) os.rename(tmpdir, target_dir)
except OSError as err: except OSError as err:
# TODO what does this do on windows? I think it'll be a different error code but I don't have a system to check it
# Shouldn't cause any new issues at least but we probably want to handle it there too.
if err.errno == errno.EXDEV: if err.errno == errno.EXDEV:
# Cross device link, typical in docker or when tmp/ and extensions/ are on different file systems # Cross device link, typical in docker or when tmp/ and extensions/ are on different file systems
# Since we can't use a rename, do the slower but more versitile shutil.move() # Since we can't use a rename, do the slower but more versitile shutil.move()
shutil.move(tmpdir, target_dir) shutil.move(tmpdir, target_dir)
else: else:
# Something else, not enough free space, permissions, etc. rethrow it so that it gets handled. # Something else, not enough free space, permissions, etc. rethrow it so that it gets handled.
raise(err) raise err
import launch import launch
launch.run_extension_installer(target_dir) launch.run_extension_installer(target_dir)
@ -162,12 +174,12 @@ def install_extension_from_url(dirname, url):
shutil.rmtree(tmpdir, True) shutil.rmtree(tmpdir, True)
def install_extension_from_index(url, hide_tags, sort_column): def install_extension_from_index(url, hide_tags, sort_column, filter_text):
ext_table, message = install_extension_from_url(None, url) ext_table, message = install_extension_from_url(None, url)
code, _ = refresh_available_extensions_from_data(hide_tags, sort_column) code, _ = refresh_available_extensions_from_data(hide_tags, sort_column, filter_text)
return code, ext_table, message return code, ext_table, message, ''
def refresh_available_extensions(url, hide_tags, sort_column): def refresh_available_extensions(url, hide_tags, sort_column):
@ -181,11 +193,17 @@ def refresh_available_extensions(url, hide_tags, sort_column):
code, tags = refresh_available_extensions_from_data(hide_tags, sort_column) code, tags = refresh_available_extensions_from_data(hide_tags, sort_column)
return url, code, gr.CheckboxGroup.update(choices=tags), '' return url, code, gr.CheckboxGroup.update(choices=tags), '', ''
def refresh_available_extensions_for_tags(hide_tags, sort_column): def refresh_available_extensions_for_tags(hide_tags, sort_column, filter_text):
code, _ = refresh_available_extensions_from_data(hide_tags, sort_column) code, _ = refresh_available_extensions_from_data(hide_tags, sort_column, filter_text)
return code, ''
def search_extensions(filter_text, hide_tags, sort_column):
code, _ = refresh_available_extensions_from_data(hide_tags, sort_column, filter_text)
return code, '' return code, ''
@ -200,7 +218,7 @@ sort_ordering = [
] ]
def refresh_available_extensions_from_data(hide_tags, sort_column): def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=""):
extlist = available_extensions["extensions"] extlist = available_extensions["extensions"]
installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions} installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions}
@ -239,7 +257,12 @@ def refresh_available_extensions_from_data(hide_tags, sort_column):
hidden += 1 hidden += 1
continue continue
install_code = f"""<input onclick="install_extension_from_index(this, '{html.escape(url)}')" type="button" value="{"Install" if not existing else "Installed"}" {"disabled=disabled" if existing else ""} class="gr-button gr-button-lg gr-button-secondary">""" if filter_text and filter_text.strip():
if filter_text.lower() not in html.escape(name).lower() and filter_text.lower() not in html.escape(description).lower():
hidden += 1
continue
install_code = f"""<button onclick="install_extension_from_index(this, '{html.escape(url)}')" {"disabled=disabled" if existing else ""} class="lg secondary gradio-button custom-button">{"Install" if not existing else "Installed"}</button>"""
tags_text = ", ".join([f"<span class='extension-tag' title='{tags.get(x, '')}'>{x}</span>" for x in extension_tags]) tags_text = ", ".join([f"<span class='extension-tag' title='{tags.get(x, '')}'>{x}</span>" for x in extension_tags])
@ -273,32 +296,41 @@ def create_ui():
with gr.Tabs(elem_id="tabs_extensions") as tabs: with gr.Tabs(elem_id="tabs_extensions") as tabs:
with gr.TabItem("Installed"): with gr.TabItem("Installed"):
with gr.Row(): with gr.Row(elem_id="extensions_installed_top"):
apply = gr.Button(value="Apply and restart UI", variant="primary") apply = gr.Button(value="Apply and restart UI", variant="primary")
check = gr.Button(value="Check for updates") check = gr.Button(value="Check for updates")
extensions_disable_all = gr.Radio(label="Disable all extensions", choices=["none", "extra", "all"], value=shared.opts.disable_all_extensions, elem_id="extensions_disable_all")
extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False) extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False).style(container=False)
extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False) extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False).style(container=False)
html = ""
if shared.opts.disable_all_extensions != "none":
html = """
<span style="color: var(--primary-400);">
"Disable all extensions" was set, change it to "none" to load all extensions again
</span>
"""
info = gr.HTML(html)
extensions_table = gr.HTML(lambda: extension_table()) extensions_table = gr.HTML(lambda: extension_table())
apply.click( apply.click(
fn=apply_and_restart, fn=apply_and_restart,
_js="extensions_apply", _js="extensions_apply",
inputs=[extensions_disabled_list, extensions_update_list], inputs=[extensions_disabled_list, extensions_update_list, extensions_disable_all],
outputs=[], outputs=[],
) )
check.click( check.click(
fn=check_updates, fn=wrap_gradio_gpu_call(check_updates, extra_outputs=[gr.update()]),
_js="extensions_check", _js="extensions_check",
inputs=[], inputs=[info, extensions_disabled_list],
outputs=[extensions_table], outputs=[extensions_table, info],
) )
with gr.TabItem("Available"): with gr.TabItem("Available"):
with gr.Row(): with gr.Row():
refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary") refresh_available_extensions_button = gr.Button(value="Load from:", variant="primary")
available_extensions_index = gr.Text(value="https://raw.githubusercontent.com/wiki/AUTOMATIC1111/stable-diffusion-webui/Extensions-index.md", label="Extension index URL").style(container=False) available_extensions_index = gr.Text(value="https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui-extensions/master/index.json", label="Extension index URL").style(container=False)
extension_to_install = gr.Text(elem_id="extension_to_install", visible=False) extension_to_install = gr.Text(elem_id="extension_to_install", visible=False)
install_extension_button = gr.Button(elem_id="install_extension_button", visible=False) install_extension_button = gr.Button(elem_id="install_extension_button", visible=False)
@ -306,30 +338,39 @@ def create_ui():
hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"]) hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"])
sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order", ], type="index") sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order", ], type="index")
with gr.Row():
search_extensions_text = gr.Text(label="Search").style(container=False)
install_result = gr.HTML() install_result = gr.HTML()
available_extensions_table = gr.HTML() available_extensions_table = gr.HTML()
refresh_available_extensions_button.click( refresh_available_extensions_button.click(
fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update(), gr.update()]), fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update(), gr.update()]),
inputs=[available_extensions_index, hide_tags, sort_column], inputs=[available_extensions_index, hide_tags, sort_column],
outputs=[available_extensions_index, available_extensions_table, hide_tags, install_result], outputs=[available_extensions_index, available_extensions_table, hide_tags, install_result, search_extensions_text],
) )
install_extension_button.click( install_extension_button.click(
fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]), fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]),
inputs=[extension_to_install, hide_tags, sort_column], inputs=[extension_to_install, hide_tags, sort_column, search_extensions_text],
outputs=[available_extensions_table, extensions_table, install_result], outputs=[available_extensions_table, extensions_table, install_result],
) )
search_extensions_text.change(
fn=modules.ui.wrap_gradio_call(search_extensions, extra_outputs=[gr.update()]),
inputs=[search_extensions_text, hide_tags, sort_column],
outputs=[available_extensions_table, install_result],
)
hide_tags.change( hide_tags.change(
fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]),
inputs=[hide_tags, sort_column], inputs=[hide_tags, sort_column, search_extensions_text],
outputs=[available_extensions_table, install_result] outputs=[available_extensions_table, install_result]
) )
sort_column.change( sort_column.change(
fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]),
inputs=[hide_tags, sort_column], inputs=[hide_tags, sort_column, search_extensions_text],
outputs=[available_extensions_table, install_result] outputs=[available_extensions_table, install_result]
) )

View File

@ -1,6 +1,11 @@
import glob
import os.path import os.path
import urllib.parse
from pathlib import Path
from PIL import PngImagePlugin
from modules import shared from modules import shared
from modules.images import read_info_from_image
import gradio as gr import gradio as gr
import json import json
import html import html
@ -8,12 +13,48 @@ import html
from modules.generation_parameters_copypaste import image_from_url_text from modules.generation_parameters_copypaste import image_from_url_text
extra_pages = [] extra_pages = []
allowed_dirs = set()
def register_page(page): def register_page(page):
"""registers extra networks page for the UI; recommend doing it in on_before_ui() callback for extensions""" """registers extra networks page for the UI; recommend doing it in on_before_ui() callback for extensions"""
extra_pages.append(page) extra_pages.append(page)
allowed_dirs.clear()
allowed_dirs.update(set(sum([x.allowed_directories_for_previews() for x in extra_pages], [])))
def fetch_file(filename: str = ""):
from starlette.responses import FileResponse
if not any([Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs]):
raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.")
ext = os.path.splitext(filename)[1].lower()
if ext not in (".png", ".jpg", ".webp"):
raise ValueError(f"File cannot be fetched: {filename}. Only png and jpg and webp.")
# would profit from returning 304
return FileResponse(filename, headers={"Accept-Ranges": "bytes"})
def get_metadata(page: str = "", item: str = ""):
from starlette.responses import JSONResponse
page = next(iter([x for x in extra_pages if x.name == page]), None)
if page is None:
return JSONResponse({})
metadata = page.metadata.get(item)
if metadata is None:
return JSONResponse({})
return JSONResponse({"metadata": metadata})
def add_pages_to_demo(app):
app.add_api_route("/sd_extra_networks/thumb", fetch_file, methods=["GET"])
app.add_api_route("/sd_extra_networks/metadata", get_metadata, methods=["GET"])
class ExtraNetworksPage: class ExtraNetworksPage:
@ -22,23 +63,73 @@ class ExtraNetworksPage:
self.name = title.lower() self.name = title.lower()
self.card_page = shared.html("extra-networks-card.html") self.card_page = shared.html("extra-networks-card.html")
self.allow_negative_prompt = False self.allow_negative_prompt = False
self.metadata = {}
def refresh(self): def refresh(self):
pass pass
def link_preview(self, filename):
return "./sd_extra_networks/thumb?filename=" + urllib.parse.quote(filename.replace('\\', '/')) + "&mtime=" + str(os.path.getmtime(filename))
def search_terms_from_path(self, filename, possible_directories=None):
abspath = os.path.abspath(filename)
for parentdir in (possible_directories if possible_directories is not None else self.allowed_directories_for_previews()):
parentdir = os.path.abspath(parentdir)
if abspath.startswith(parentdir):
return abspath[len(parentdir):].replace('\\', '/')
return ""
def create_html(self, tabname): def create_html(self, tabname):
view = shared.opts.extra_networks_default_view view = shared.opts.extra_networks_default_view
items_html = '' items_html = ''
self.metadata = {}
subdirs = {}
for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]:
for x in glob.glob(os.path.join(parentdir, '**/*'), recursive=True):
if not os.path.isdir(x):
continue
subdir = os.path.abspath(x)[len(parentdir):].replace("\\", "/")
while subdir.startswith("/"):
subdir = subdir[1:]
is_empty = len(os.listdir(x)) == 0
if not is_empty and not subdir.endswith("/"):
subdir = subdir + "/"
subdirs[subdir] = 1
if subdirs:
subdirs = {"": 1, **subdirs}
subdirs_html = "".join([f"""
<button class='lg secondary gradio-button custom-button{" search-all" if subdir=="" else ""}' onclick='extraNetworksSearchButton("{tabname}_extra_tabs", event)'>
{html.escape(subdir if subdir!="" else "all")}
</button>
""" for subdir in subdirs])
for item in self.list_items(): for item in self.list_items():
metadata = item.get("metadata")
if metadata:
self.metadata[item["name"]] = metadata
items_html += self.create_html_for_item(item, tabname) items_html += self.create_html_for_item(item, tabname)
if items_html == '': if items_html == '':
dirs = "".join([f"<li>{x}</li>" for x in self.allowed_directories_for_previews()]) dirs = "".join([f"<li>{x}</li>" for x in self.allowed_directories_for_previews()])
items_html = shared.html("extra-networks-no-cards.html").format(dirs=dirs) items_html = shared.html("extra-networks-no-cards.html").format(dirs=dirs)
self_name_id = self.name.replace(" ", "_")
res = f""" res = f"""
<div id='{tabname}_{self.name}_cards' class='extra-network-{view}'> <div id='{tabname}_{self_name_id}_subdirs' class='extra-network-subdirs extra-network-subdirs-{view}'>
{subdirs_html}
</div>
<div id='{tabname}_{self_name_id}_cards' class='extra-network-{view}'>
{items_html} {items_html}
</div> </div>
""" """
@ -54,18 +145,62 @@ class ExtraNetworksPage:
def create_html_for_item(self, item, tabname): def create_html_for_item(self, item, tabname):
preview = item.get("preview", None) preview = item.get("preview", None)
onclick = item.get("onclick", None)
if onclick is None:
onclick = '"' + html.escape(f"""return cardClicked({json.dumps(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"'
height = f"height: {shared.opts.extra_networks_card_height}px;" if shared.opts.extra_networks_card_height else ''
width = f"width: {shared.opts.extra_networks_card_width}px;" if shared.opts.extra_networks_card_width else ''
background_image = f"background-image: url(\"{html.escape(preview)}\");" if preview else ''
metadata_button = ""
metadata = item.get("metadata")
if metadata:
metadata_button = f"<div class='metadata-button' title='Show metadata' onclick='extraNetworksRequestMetadata(event, {json.dumps(self.name)}, {json.dumps(item['name'])})'></div>"
args = { args = {
"preview_html": "style='background-image: url(\"" + html.escape(preview) + "\")'" if preview else '', "style": f"'{height}{width}{background_image}'",
"prompt": item["prompt"], "prompt": item.get("prompt", None),
"tabname": json.dumps(tabname), "tabname": json.dumps(tabname),
"local_preview": json.dumps(item["local_preview"]), "local_preview": json.dumps(item["local_preview"]),
"name": item["name"], "name": item["name"],
"card_clicked": '"' + html.escape(f"""return cardClicked({json.dumps(tabname)}, {item["prompt"]}, {"true" if self.allow_negative_prompt else "false"})""") + '"', "description": (item.get("description") or ""),
"card_clicked": onclick,
"save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"', "save_card_preview": '"' + html.escape(f"""return saveCardPreview(event, {json.dumps(tabname)}, {json.dumps(item["local_preview"])})""") + '"',
"search_term": item.get("search_term", ""),
"metadata_button": metadata_button,
} }
return self.card_page.format(**args) return self.card_page.format(**args)
def find_preview(self, path):
"""
Find a preview PNG for a given path (without extension) and call link_preview on it.
"""
preview_extensions = ["png", "jpg", "webp"]
if shared.opts.samples_format not in preview_extensions:
preview_extensions.append(shared.opts.samples_format)
potential_files = sum([[path + "." + ext, path + ".preview." + ext] for ext in preview_extensions], [])
for file in potential_files:
if os.path.isfile(file):
return self.link_preview(file)
return None
def find_description(self, path):
"""
Find and read a description file for a given path (without extension).
"""
for file in [f"{path}.txt", f"{path}.description.txt"]:
try:
with open(file, "r", encoding="utf-8", errors="replace") as f:
return f.read()
except OSError:
pass
return None
def intialize(): def intialize():
extra_pages.clear() extra_pages.clear()
@ -107,18 +242,22 @@ def create_ui(container, button, tabname):
with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs: with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs:
for page in ui.stored_extra_pages: for page in ui.stored_extra_pages:
with gr.Tab(page.title): with gr.Tab(page.title):
page_elem = gr.HTML(page.create_html(ui.tabname)) page_elem = gr.HTML(page.create_html(ui.tabname))
ui.pages.append(page_elem) ui.pages.append(page_elem)
filter = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False) filter = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", placeholder="Search...", visible=False)
button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh") button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh")
button_close = gr.Button('Close', elem_id=tabname+"_extra_close")
ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False) ui.button_save_preview = gr.Button('Save preview', elem_id=tabname+"_save_preview", visible=False)
ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False) ui.preview_target_filename = gr.Textbox('Preview save filename', elem_id=tabname+"_preview_filename", visible=False)
button.click(fn=lambda: gr.update(visible=True), inputs=[], outputs=[container]) def toggle_visibility(is_visible):
button_close.click(fn=lambda: gr.update(visible=False), inputs=[], outputs=[container]) is_visible = not is_visible
return is_visible, gr.update(visible=is_visible), gr.update(variant=("secondary-down" if is_visible else "secondary"))
state_visible = gr.State(value=False)
button.click(fn=toggle_visibility, inputs=[state_visible], outputs=[state_visible, container, button])
def refresh(): def refresh():
res = [] res = []
@ -138,7 +277,7 @@ def path_is_parent(parent_path, child_path):
parent_path = os.path.abspath(parent_path) parent_path = os.path.abspath(parent_path)
child_path = os.path.abspath(child_path) child_path = os.path.abspath(child_path)
return os.path.commonpath([parent_path]) == os.path.commonpath([parent_path, child_path]) return child_path.startswith(parent_path)
def setup_ui(ui, gallery): def setup_ui(ui, gallery):
@ -153,6 +292,7 @@ def setup_ui(ui, gallery):
img_info = images[index if index >= 0 else 0] img_info = images[index if index >= 0 else 0]
image = image_from_url_text(img_info) image = image_from_url_text(img_info)
geninfo, items = read_info_from_image(image)
is_allowed = False is_allowed = False
for extra_page in ui.stored_extra_pages: for extra_page in ui.stored_extra_pages:
@ -162,13 +302,19 @@ def setup_ui(ui, gallery):
assert is_allowed, f'writing to {filename} is not allowed' assert is_allowed, f'writing to {filename} is not allowed'
image.save(filename) if geninfo:
pnginfo_data = PngImagePlugin.PngInfo()
pnginfo_data.add_text('parameters', geninfo)
image.save(filename, pnginfo=pnginfo_data)
else:
image.save(filename)
return [page.create_html(ui.tabname) for page in ui.stored_extra_pages] return [page.create_html(ui.tabname) for page in ui.stored_extra_pages]
ui.button_save_preview.click( ui.button_save_preview.click(
fn=save_preview, fn=save_preview,
_js="function(x, y, z){console.log(x, y, z); return [selected_gallery_index(), y, z]}", _js="function(x, y, z){return [selected_gallery_index(), y, z]}",
inputs=[ui.preview_target_filename, gallery, ui.preview_target_filename], inputs=[ui.preview_target_filename, gallery, ui.preview_target_filename],
outputs=[*ui.pages] outputs=[*ui.pages]
) )

View File

@ -0,0 +1,31 @@
import html
import json
import os
from modules import shared, ui_extra_networks, sd_models
class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage):
def __init__(self):
super().__init__('Checkpoints')
def refresh(self):
shared.refresh_checkpoints()
def list_items(self):
checkpoint: sd_models.CheckpointInfo
for name, checkpoint in sd_models.checkpoints_list.items():
path, ext = os.path.splitext(checkpoint.filename)
yield {
"name": checkpoint.name_for_extra,
"filename": path,
"preview": self.find_preview(path),
"description": self.find_description(path),
"search_term": self.search_terms_from_path(checkpoint.filename) + " " + (checkpoint.sha256 or ""),
"onclick": '"' + html.escape(f"""return selectCheckpoint({json.dumps(name)})""") + '"',
"local_preview": f"{path}.{shared.opts.samples_format}",
}
def allowed_directories_for_previews(self):
return [v for v in [shared.cmd_opts.ckpt_dir, sd_models.model_path] if v is not None]

View File

@ -14,20 +14,15 @@ class ExtraNetworksPageHypernetworks(ui_extra_networks.ExtraNetworksPage):
def list_items(self): def list_items(self):
for name, path in shared.hypernetworks.items(): for name, path in shared.hypernetworks.items():
path, ext = os.path.splitext(path) path, ext = os.path.splitext(path)
previews = [path + ".png", path + ".preview.png"]
preview = None
for file in previews:
if os.path.isfile(file):
preview = "./file=" + file.replace('\\', '/') + "?mtime=" + str(os.path.getmtime(file))
break
yield { yield {
"name": name, "name": name,
"filename": path, "filename": path,
"preview": preview, "preview": self.find_preview(path),
"description": self.find_description(path),
"search_term": self.search_terms_from_path(path),
"prompt": json.dumps(f"<hypernet:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"), "prompt": json.dumps(f"<hypernet:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
"local_preview": path + ".png", "local_preview": f"{path}.preview.{shared.opts.samples_format}",
} }
def allowed_directories_for_previews(self): def allowed_directories_for_previews(self):

View File

@ -1,7 +1,7 @@
import json import json
import os import os
from modules import ui_extra_networks, sd_hijack from modules import ui_extra_networks, sd_hijack, shared
class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage): class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
@ -15,18 +15,14 @@ class ExtraNetworksPageTextualInversion(ui_extra_networks.ExtraNetworksPage):
def list_items(self): def list_items(self):
for embedding in sd_hijack.model_hijack.embedding_db.word_embeddings.values(): for embedding in sd_hijack.model_hijack.embedding_db.word_embeddings.values():
path, ext = os.path.splitext(embedding.filename) path, ext = os.path.splitext(embedding.filename)
preview_file = path + ".preview.png"
preview = None
if os.path.isfile(preview_file):
preview = "./file=" + preview_file.replace('\\', '/') + "?mtime=" + str(os.path.getmtime(preview_file))
yield { yield {
"name": embedding.name, "name": embedding.name,
"filename": embedding.filename, "filename": embedding.filename,
"preview": preview, "preview": self.find_preview(path),
"description": self.find_description(path),
"search_term": self.search_terms_from_path(embedding.filename),
"prompt": json.dumps(embedding.name), "prompt": json.dumps(embedding.name),
"local_preview": path + ".preview.png", "local_preview": f"{path}.preview.{shared.opts.samples_format}",
} }
def allowed_directories_for_previews(self): def allowed_directories_for_previews(self):

View File

@ -11,7 +11,6 @@ from modules import modelloader, shared
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
NEAREST = (Image.Resampling.NEAREST if hasattr(Image, 'Resampling') else Image.NEAREST) NEAREST = (Image.Resampling.NEAREST if hasattr(Image, 'Resampling') else Image.NEAREST)
from modules.paths import models_path
class Upscaler: class Upscaler:
@ -39,7 +38,7 @@ class Upscaler:
self.mod_scale = None self.mod_scale = None
if self.model_path is None and self.name: if self.model_path is None and self.name:
self.model_path = os.path.join(models_path, self.name) self.model_path = os.path.join(shared.models_path, self.name)
if self.model_path and create_dirs: if self.model_path and create_dirs:
os.makedirs(self.model_path, exist_ok=True) os.makedirs(self.model_path, exist_ok=True)
@ -143,4 +142,4 @@ class UpscalerNearest(Upscaler):
def __init__(self, dirname=None): def __init__(self, dirname=None):
super().__init__(False) super().__init__(False)
self.name = "Nearest" self.name = "Nearest"
self.scalers = [UpscalerData("Nearest", None, self)] self.scalers = [UpscalerData("Nearest", None, self)]

View File

@ -4,7 +4,7 @@ basicsr
fonts fonts
font-roboto font-roboto
gfpgan gfpgan
gradio==3.16.2 gradio==3.23
invisible-watermark invisible-watermark
numpy numpy
omegaconf omegaconf
@ -16,7 +16,7 @@ pytorch_lightning==1.7.7
realesrgan realesrgan
scikit-image>=0.19 scikit-image>=0.19
timm==0.4.12 timm==0.4.12
transformers==4.19.2 transformers==4.25.1
torch torch
einops einops
jsonmerge jsonmerge
@ -30,3 +30,4 @@ GitPython
torchsde torchsde
safetensors safetensors
psutil psutil
rich

View File

@ -1,15 +1,15 @@
blendmodes==2022 blendmodes==2022
transformers==4.19.2 transformers==4.25.1
accelerate==0.12.0 accelerate==0.12.0
basicsr==1.4.2 basicsr==1.4.2
gfpgan==1.3.8 gfpgan==1.3.8
gradio==3.16.2 gradio==3.23
numpy==1.23.3 numpy==1.23.3
Pillow==9.4.0 Pillow==9.4.0
realesrgan==0.3.0 realesrgan==0.3.0
torch torch
omegaconf==2.2.3 omegaconf==2.2.3
pytorch_lightning==1.7.6 pytorch_lightning==1.9.4
scikit-image==0.19.2 scikit-image==0.19.2
fonts fonts
font-roboto font-roboto
@ -23,7 +23,8 @@ torchdiffeq==0.2.3
kornia==0.6.7 kornia==0.6.7
lark==1.1.2 lark==1.1.2
inflection==0.5.1 inflection==0.5.1
GitPython==3.1.27 GitPython==3.1.30
torchsde==0.2.5 torchsde==0.2.5
safetensors==0.2.7 safetensors==0.3.0
httpcore<=0.15 httpcore<=0.15
fastapi==0.94.0

View File

@ -1,7 +1,9 @@
function gradioApp() { function gradioApp() {
const elems = document.getElementsByTagName('gradio-app') const elems = document.getElementsByTagName('gradio-app')
const gradioShadowRoot = elems.length == 0 ? null : elems[0].shadowRoot const elem = elems.length == 0 ? document : elems[0]
return !!gradioShadowRoot ? gradioShadowRoot : document;
if (elem !== document) elem.getElementById = function(id){ return document.getElementById(id) }
return elem.shadowRoot ? elem.shadowRoot : elem
} }
function get_uiCurrentTab() { function get_uiCurrentTab() {

View File

@ -6,23 +6,21 @@ from tqdm import trange
import modules.scripts as scripts import modules.scripts as scripts
import gradio as gr import gradio as gr
from modules import processing, shared, sd_samplers, prompt_parser from modules import processing, shared, sd_samplers, sd_samplers_common
from modules.processing import Processed
from modules.shared import opts, cmd_opts, state
import torch import torch
import k_diffusion as K import k_diffusion as K
from PIL import Image
from torch import autocast
from einops import rearrange, repeat
def find_noise_for_image(p, cond, uncond, cfg_scale, steps): def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
x = p.init_latent x = p.init_latent
s_in = x.new_ones([x.shape[0]]) s_in = x.new_ones([x.shape[0]])
dnw = K.external.CompVisDenoiser(shared.sd_model) if shared.sd_model.parameterization == "v":
dnw = K.external.CompVisVDenoiser(shared.sd_model)
skip = 1
else:
dnw = K.external.CompVisDenoiser(shared.sd_model)
skip = 0
sigmas = dnw.get_sigmas(steps).flip(0) sigmas = dnw.get_sigmas(steps).flip(0)
shared.state.sampling_steps = steps shared.state.sampling_steps = steps
@ -37,7 +35,7 @@ def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
image_conditioning = torch.cat([p.image_conditioning] * 2) image_conditioning = torch.cat([p.image_conditioning] * 2)
cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]} cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]}
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)] c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)[skip:]]
t = dnw.sigma_to_t(sigma_in) t = dnw.sigma_to_t(sigma_in)
eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in) eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
@ -50,7 +48,7 @@ def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
x = x + d * dt x = x + d * dt
sd_samplers.store_latent(x) sd_samplers_common.store_latent(x)
# This shouldn't be necessary, but solved some VRAM issues # This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t, del x_in, sigma_in, cond_in, c_out, c_in, t,
@ -69,7 +67,12 @@ def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):
x = p.init_latent x = p.init_latent
s_in = x.new_ones([x.shape[0]]) s_in = x.new_ones([x.shape[0]])
dnw = K.external.CompVisDenoiser(shared.sd_model) if shared.sd_model.parameterization == "v":
dnw = K.external.CompVisVDenoiser(shared.sd_model)
skip = 1
else:
dnw = K.external.CompVisDenoiser(shared.sd_model)
skip = 0
sigmas = dnw.get_sigmas(steps).flip(0) sigmas = dnw.get_sigmas(steps).flip(0)
shared.state.sampling_steps = steps shared.state.sampling_steps = steps
@ -84,7 +87,7 @@ def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):
image_conditioning = torch.cat([p.image_conditioning] * 2) image_conditioning = torch.cat([p.image_conditioning] * 2)
cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]} cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]}
c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)] c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)[skip:]]
if i == 1: if i == 1:
t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2)) t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))
@ -104,7 +107,7 @@ def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):
dt = sigmas[i] - sigmas[i - 1] dt = sigmas[i] - sigmas[i - 1]
x = x + d * dt x = x + d * dt
sd_samplers.store_latent(x) sd_samplers_common.store_latent(x)
# This shouldn't be necessary, but solved some VRAM issues # This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t, del x_in, sigma_in, cond_in, c_out, c_in, t,
@ -125,7 +128,7 @@ class Script(scripts.Script):
def show(self, is_img2img): def show(self, is_img2img):
return is_img2img return is_img2img
def ui(self, is_img2img): def ui(self, is_img2img):
info = gr.Markdown(''' info = gr.Markdown('''
* `CFG Scale` should be 2 or lower. * `CFG Scale` should be 2 or lower.
''') ''')
@ -213,4 +216,3 @@ class Script(scripts.Script):
processed = processing.process_images(p) processed = processing.process_images(p)
return processed return processed

View File

@ -1,13 +1,10 @@
import numpy as np import math
from tqdm import trange
import modules.scripts as scripts
import gradio as gr import gradio as gr
import modules.scripts as scripts
from modules import processing, shared, sd_samplers, images from modules import deepbooru, images, processing, shared
from modules.processing import Processed from modules.processing import Processed
from modules.sd_samplers import samplers from modules.shared import opts, state
from modules.shared import opts, cmd_opts, state
class Script(scripts.Script): class Script(scripts.Script):
@ -19,37 +16,65 @@ class Script(scripts.Script):
def ui(self, is_img2img): def ui(self, is_img2img):
loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops")) loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops"))
denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=self.elem_id("denoising_strength_change_factor")) final_denoising_strength = gr.Slider(minimum=0, maximum=1, step=0.01, label='Final denoising strength', value=0.5, elem_id=self.elem_id("final_denoising_strength"))
denoising_curve = gr.Dropdown(label="Denoising strength curve", choices=["Aggressive", "Linear", "Lazy"], value="Linear")
append_interrogation = gr.Dropdown(label="Append interrogated prompt at each iteration", choices=["None", "CLIP", "DeepBooru"], value="None")
return [loops, denoising_strength_change_factor] return [loops, final_denoising_strength, denoising_curve, append_interrogation]
def run(self, p, loops, denoising_strength_change_factor): def run(self, p, loops, final_denoising_strength, denoising_curve, append_interrogation):
processing.fix_seed(p) processing.fix_seed(p)
batch_count = p.n_iter batch_count = p.n_iter
p.extra_generation_params = { p.extra_generation_params = {
"Denoising strength change factor": denoising_strength_change_factor, "Final denoising strength": final_denoising_strength,
"Denoising curve": denoising_curve
} }
p.batch_size = 1 p.batch_size = 1
p.n_iter = 1 p.n_iter = 1
output_images, info = None, None info = None
initial_seed = None initial_seed = None
initial_info = None initial_info = None
initial_denoising_strength = p.denoising_strength
grids = [] grids = []
all_images = [] all_images = []
original_init_image = p.init_images original_init_image = p.init_images
original_prompt = p.prompt
original_inpainting_fill = p.inpainting_fill
state.job_count = loops * batch_count state.job_count = loops * batch_count
initial_color_corrections = [processing.setup_color_correction(p.init_images[0])] initial_color_corrections = [processing.setup_color_correction(p.init_images[0])]
for n in range(batch_count): def calculate_denoising_strength(loop):
history = [] strength = initial_denoising_strength
if loops == 1:
return strength
progress = loop / (loops - 1)
if denoising_curve == "Aggressive":
strength = math.sin((progress) * math.pi * 0.5)
elif denoising_curve == "Lazy":
strength = 1 - math.cos((progress) * math.pi * 0.5)
else:
strength = progress
change = (final_denoising_strength - initial_denoising_strength) * strength
return initial_denoising_strength + change
history = []
for n in range(batch_count):
# Reset to original init image at the start of each batch # Reset to original init image at the start of each batch
p.init_images = original_init_image p.init_images = original_init_image
# Reset to original denoising strength
p.denoising_strength = initial_denoising_strength
last_image = None
for i in range(loops): for i in range(loops):
p.n_iter = 1 p.n_iter = 1
p.batch_size = 1 p.batch_size = 1
@ -58,30 +83,57 @@ class Script(scripts.Script):
if opts.img2img_color_correction: if opts.img2img_color_correction:
p.color_corrections = initial_color_corrections p.color_corrections = initial_color_corrections
if append_interrogation != "None":
p.prompt = original_prompt + ", " if original_prompt != "" else ""
if append_interrogation == "CLIP":
p.prompt += shared.interrogator.interrogate(p.init_images[0])
elif append_interrogation == "DeepBooru":
p.prompt += deepbooru.model.tag(p.init_images[0])
state.job = f"Iteration {i + 1}/{loops}, batch {n + 1}/{batch_count}" state.job = f"Iteration {i + 1}/{loops}, batch {n + 1}/{batch_count}"
processed = processing.process_images(p) processed = processing.process_images(p)
# Generation cancelled.
if state.interrupted:
break
if initial_seed is None: if initial_seed is None:
initial_seed = processed.seed initial_seed = processed.seed
initial_info = processed.info initial_info = processed.info
init_img = processed.images[0]
p.init_images = [init_img]
p.seed = processed.seed + 1 p.seed = processed.seed + 1
p.denoising_strength = min(max(p.denoising_strength * denoising_strength_change_factor, 0.1), 1) p.denoising_strength = calculate_denoising_strength(i + 1)
history.append(processed.images[0])
if state.skipped:
break
last_image = processed.images[0]
p.init_images = [last_image]
p.inpainting_fill = 1 # Set "masked content" to "original" for next loop.
if batch_count == 1:
history.append(last_image)
all_images.append(last_image)
if batch_count > 1 and not state.skipped and not state.interrupted:
history.append(last_image)
all_images.append(last_image)
p.inpainting_fill = original_inpainting_fill
if state.interrupted:
break
if len(history) > 1:
grid = images.image_grid(history, rows=1) grid = images.image_grid(history, rows=1)
if opts.grid_save: if opts.grid_save:
images.save_image(grid, p.outpath_grids, "grid", initial_seed, p.prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename, grid=True, p=p) images.save_image(grid, p.outpath_grids, "grid", initial_seed, p.prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename, grid=True, p=p)
grids.append(grid) if opts.return_grid:
all_images += history grids.append(grid)
if opts.return_grid: all_images = grids + all_images
all_images = grids + all_images
processed = Processed(p, all_images, initial_seed, initial_info) processed = Processed(p, all_images, initial_seed, initial_info)

View File

@ -17,22 +17,24 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
def ui(self): def ui(self):
selected_tab = gr.State(value=0) selected_tab = gr.State(value=0)
with gr.Tabs(elem_id="extras_resize_mode"): with gr.Column():
with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by: with FormRow():
upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") with gr.Tabs(elem_id="extras_resize_mode"):
with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by:
upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize")
with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to:
with FormRow(): with FormRow():
upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w") upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w")
upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h") upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h")
upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
with FormRow(): with FormRow():
extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
with FormRow(): with FormRow():
extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility")
tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab]) tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab])
tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab]) tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab])
@ -104,3 +106,28 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
def image_changed(self): def image_changed(self):
upscale_cache.clear() upscale_cache.clear()
class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
name = "Simple Upscale"
order = 900
def ui(self):
with FormRow():
upscaler_name = gr.Dropdown(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
upscale_by = gr.Slider(minimum=0.05, maximum=8.0, step=0.05, label="Upscale by", value=2)
return {
"upscale_by": upscale_by,
"upscaler_name": upscaler_name,
}
def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None):
if upscaler_name is None or upscaler_name == "None":
return
upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_name]), None)
assert upscaler1, f'could not find upscaler named {upscaler_name}'
pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False)
pp.info[f"Postprocess upscaler"] = upscaler1.name

View File

@ -44,16 +44,34 @@ class Script(scripts.Script):
def title(self): def title(self):
return "Prompt matrix" return "Prompt matrix"
def ui(self, is_img2img): def ui(self, is_img2img):
put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start")) gr.HTML('<br />')
different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds")) with gr.Row():
with gr.Column():
put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start"))
different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds"))
with gr.Column():
prompt_type = gr.Radio(["positive", "negative"], label="Select prompt", elem_id=self.elem_id("prompt_type"), value="positive")
variations_delimiter = gr.Radio(["comma", "space"], label="Select joining char", elem_id=self.elem_id("variations_delimiter"), value="comma")
with gr.Column():
margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size"))
return [put_at_start, different_seeds] return [put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size]
def run(self, p, put_at_start, different_seeds): def run(self, p, put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size):
modules.processing.fix_seed(p) modules.processing.fix_seed(p)
# Raise error if promp type is not positive or negative
if prompt_type not in ["positive", "negative"]:
raise ValueError(f"Unknown prompt type {prompt_type}")
# Raise error if variations delimiter is not comma or space
if variations_delimiter not in ["comma", "space"]:
raise ValueError(f"Unknown variations delimiter {variations_delimiter}")
original_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt prompt = p.prompt if prompt_type == "positive" else p.negative_prompt
original_prompt = prompt[0] if type(prompt) == list else prompt
positive_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt
delimiter = ", " if variations_delimiter == "comma" else " "
all_prompts = [] all_prompts = []
prompt_matrix_parts = original_prompt.split("|") prompt_matrix_parts = original_prompt.split("|")
@ -66,20 +84,23 @@ class Script(scripts.Script):
else: else:
selected_prompts = [prompt_matrix_parts[0]] + selected_prompts selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
all_prompts.append(", ".join(selected_prompts)) all_prompts.append(delimiter.join(selected_prompts))
p.n_iter = math.ceil(len(all_prompts) / p.batch_size) p.n_iter = math.ceil(len(all_prompts) / p.batch_size)
p.do_not_save_grid = True p.do_not_save_grid = True
print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.") print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.")
p.prompt = all_prompts if prompt_type == "positive":
p.prompt = all_prompts
else:
p.negative_prompt = all_prompts
p.seed = [p.seed + (i if different_seeds else 0) for i in range(len(all_prompts))] p.seed = [p.seed + (i if different_seeds else 0) for i in range(len(all_prompts))]
p.prompt_for_display = original_prompt p.prompt_for_display = positive_prompt
processed = process_images(p) processed = process_images(p)
grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2)) grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
grid = images.draw_prompt_matrix(grid, p.width, p.height, prompt_matrix_parts) grid = images.draw_prompt_matrix(grid, processed.images[0].width, processed.images[0].height, prompt_matrix_parts, margin_size)
processed.images.insert(0, grid) processed.images.insert(0, grid)
processed.index_of_first_image = 1 processed.index_of_first_image = 1
processed.infotexts.insert(0, processed.infotexts[0]) processed.infotexts.insert(0, processed.infotexts[0])

View File

@ -25,6 +25,8 @@ from modules.ui_components import ToolButton
fill_values_symbol = "\U0001f4d2" # 📒 fill_values_symbol = "\U0001f4d2" # 📒
AxisInfo = namedtuple('AxisInfo', ['axis', 'values'])
def apply_field(field): def apply_field(field):
def fun(p, x, xs): def fun(p, x, xs):
@ -123,7 +125,25 @@ def apply_vae(p, x, xs):
def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _): def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _):
p.styles = x.split(',') p.styles.extend(x.split(','))
def apply_uni_pc_order(p, x, xs):
opts.data["uni_pc_order"] = min(x, p.steps - 1)
def apply_face_restore(p, opt, x):
opt = opt.lower()
if opt == 'codeformer':
is_active = True
p.face_restoration_model = 'CodeFormer'
elif opt == 'gfpgan':
is_active = True
p.face_restoration_model = 'GFPGAN'
else:
is_active = opt in ('true', 'yes', 'y', '1')
p.restore_faces = is_active
def format_value_add_label(p, opt, x): def format_value_add_label(p, opt, x):
@ -186,6 +206,7 @@ axis_options = [
AxisOption("Steps", int, apply_field("steps")), AxisOption("Steps", int, apply_field("steps")),
AxisOptionTxt2Img("Hires steps", int, apply_field("hr_second_pass_steps")), AxisOptionTxt2Img("Hires steps", int, apply_field("hr_second_pass_steps")),
AxisOption("CFG Scale", float, apply_field("cfg_scale")), AxisOption("CFG Scale", float, apply_field("cfg_scale")),
AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")),
AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value), AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value),
AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list),
AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]),
@ -202,69 +223,119 @@ axis_options = [
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)), AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: list(sd_vae.vae_dict)),
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)), AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
AxisOption("UniPC Order", int, apply_uni_pc_order, cost=0.5),
AxisOption("Face restore", str, apply_face_restore, format_value=format_value),
] ]
def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend, include_lone_images, swap_axes_processing_order): def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend, include_lone_images, include_sub_grids, first_axes_processed, second_axes_processed, margin_size):
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
hor_texts = [[images.GridAnnotation(x)] for x in x_labels] hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
title_texts = [[images.GridAnnotation(z)] for z in z_labels]
# Temporary list of all the images that are generated to be populated into the grid. list_size = (len(xs) * len(ys) * len(zs))
# Will be filled with empty images for any individual step that fails to process properly
image_cache = [None] * (len(xs) * len(ys))
processed_result = None processed_result = None
cell_mode = "P"
cell_size = (1, 1)
state.job_count = len(xs) * len(ys) * p.n_iter state.job_count = list_size * p.n_iter
def process_cell(x, y, ix, iy): def process_cell(x, y, z, ix, iy, iz):
nonlocal image_cache, processed_result, cell_mode, cell_size nonlocal processed_result
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}" def index(ix, iy, iz):
return ix + iy * len(xs) + iz * len(xs) * len(ys)
processed: Processed = cell(x, y) state.job = f"{index(ix, iy, iz) + 1} out of {list_size}"
try: processed: Processed = cell(x, y, z, ix, iy, iz)
# this dereference will throw an exception if the image was not processed
# (this happens in cases such as if the user stops the process from the UI)
processed_image = processed.images[0]
if processed_result is None: if processed_result is None:
# Use our first valid processed result as a template container to hold our full results # Use our first processed result object as a template container to hold our full results
processed_result = copy(processed) processed_result = copy(processed)
cell_mode = processed_image.mode processed_result.images = [None] * list_size
cell_size = processed_image.size processed_result.all_prompts = [None] * list_size
processed_result.images = [Image.new(cell_mode, cell_size)] processed_result.all_seeds = [None] * list_size
processed_result.infotexts = [None] * list_size
processed_result.index_of_first_image = 1
image_cache[ix + iy * len(xs)] = processed_image idx = index(ix, iy, iz)
if include_lone_images: if processed.images:
processed_result.images.append(processed_image) # Non-empty list indicates some degree of success.
processed_result.all_prompts.append(processed.prompt) processed_result.images[idx] = processed.images[0]
processed_result.all_seeds.append(processed.seed) processed_result.all_prompts[idx] = processed.prompt
processed_result.infotexts.append(processed.infotexts[0]) processed_result.all_seeds[idx] = processed.seed
except: processed_result.infotexts[idx] = processed.infotexts[0]
image_cache[ix + iy * len(xs)] = Image.new(cell_mode, cell_size) else:
cell_mode = "P"
cell_size = (processed_result.width, processed_result.height)
if processed_result.images[0] is not None:
cell_mode = processed_result.images[0].mode
#This corrects size in case of batches:
cell_size = processed_result.images[0].size
processed_result.images[idx] = Image.new(cell_mode, cell_size)
if swap_axes_processing_order:
if first_axes_processed == 'x':
for ix, x in enumerate(xs): for ix, x in enumerate(xs):
for iy, y in enumerate(ys): if second_axes_processed == 'y':
process_cell(x, y, ix, iy) for iy, y in enumerate(ys):
else: for iz, z in enumerate(zs):
process_cell(x, y, z, ix, iy, iz)
else:
for iz, z in enumerate(zs):
for iy, y in enumerate(ys):
process_cell(x, y, z, ix, iy, iz)
elif first_axes_processed == 'y':
for iy, y in enumerate(ys): for iy, y in enumerate(ys):
for ix, x in enumerate(xs): if second_axes_processed == 'x':
process_cell(x, y, ix, iy) for ix, x in enumerate(xs):
for iz, z in enumerate(zs):
process_cell(x, y, z, ix, iy, iz)
else:
for iz, z in enumerate(zs):
for ix, x in enumerate(xs):
process_cell(x, y, z, ix, iy, iz)
elif first_axes_processed == 'z':
for iz, z in enumerate(zs):
if second_axes_processed == 'x':
for ix, x in enumerate(xs):
for iy, y in enumerate(ys):
process_cell(x, y, z, ix, iy, iz)
else:
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
process_cell(x, y, z, ix, iy, iz)
if not processed_result: if not processed_result:
print("Unexpected error: draw_xy_grid failed to return even a single processed image") # Should never happen, I've only seen it on one of four open tabs and it needed to refresh.
print("Unexpected error: Processing could not begin, you may need to refresh the tab or restart the service.")
return Processed(p, [])
elif not any(processed_result.images):
print("Unexpected error: draw_xyz_grid failed to return even a single processed image")
return Processed(p, []) return Processed(p, [])
grid = images.image_grid(image_cache, rows=len(ys)) z_count = len(zs)
if draw_legend: sub_grids = [None] * z_count
grid = images.draw_grid_annotations(grid, cell_size[0], cell_size[1], hor_texts, ver_texts) for i in range(z_count):
start_index = (i * len(xs) * len(ys)) + i
end_index = start_index + len(xs) * len(ys)
grid = images.image_grid(processed_result.images[start_index:end_index], rows=len(ys))
if draw_legend:
grid = images.draw_grid_annotations(grid, processed_result.images[start_index].size[0], processed_result.images[start_index].size[1], hor_texts, ver_texts, margin_size)
processed_result.images.insert(i, grid)
processed_result.all_prompts.insert(i, processed_result.all_prompts[start_index])
processed_result.all_seeds.insert(i, processed_result.all_seeds[start_index])
processed_result.infotexts.insert(i, processed_result.infotexts[start_index])
processed_result.images[0] = grid sub_grid_size = processed_result.images[0].size
z_grid = images.image_grid(processed_result.images[:z_count], rows=1)
if draw_legend:
z_grid = images.draw_grid_annotations(z_grid, sub_grid_size[0], sub_grid_size[1], title_texts, [[images.GridAnnotation()]])
processed_result.images.insert(0, z_grid)
#TODO: Deeper aspects of the program rely on grid info being misaligned between metadata arrays, which is not ideal.
#processed_result.all_prompts.insert(0, processed_result.all_prompts[0])
#processed_result.all_seeds.insert(0, processed_result.all_seeds[0])
processed_result.infotexts.insert(0, processed_result.infotexts[0])
return processed_result return processed_result
@ -273,9 +344,11 @@ class SharedSettingsStackHelper(object):
def __enter__(self): def __enter__(self):
self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
self.vae = opts.sd_vae self.vae = opts.sd_vae
self.uni_pc_order = opts.uni_pc_order
def __exit__(self, exc_type, exc_value, tb): def __exit__(self, exc_type, exc_value, tb):
opts.data["sd_vae"] = self.vae opts.data["sd_vae"] = self.vae
opts.data["uni_pc_order"] = self.uni_pc_order
modules.sd_models.reload_model_weights() modules.sd_models.reload_model_weights()
modules.sd_vae.reload_vae_weights() modules.sd_vae.reload_vae_weights()
@ -291,7 +364,7 @@ re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+
class Script(scripts.Script): class Script(scripts.Script):
def title(self): def title(self):
return "X/Y plot" return "X/Y/Z plot"
def ui(self, is_img2img): def ui(self, is_img2img):
self.current_axis_options = [x for x in axis_options if type(x) == AxisOption or x.is_img2img == is_img2img] self.current_axis_options = [x for x in axis_options if type(x) == AxisOption or x.is_img2img == is_img2img]
@ -301,24 +374,42 @@ class Script(scripts.Script):
with gr.Row(): with gr.Row():
x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type")) x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type"))
x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values")) x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values"))
fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xy_grid_fill_x_tool_button", visible=False) fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_x_tool_button", visible=False)
with gr.Row(): with gr.Row():
y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type")) y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type"))
y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values")) y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values"))
fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xy_grid_fill_y_tool_button", visible=False) fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_y_tool_button", visible=False)
with gr.Row():
z_type = gr.Dropdown(label="Z type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("z_type"))
z_values = gr.Textbox(label="Z values", lines=1, elem_id=self.elem_id("z_values"))
fill_z_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_z_tool_button", visible=False)
with gr.Row(variant="compact", elem_id="axis_options"): with gr.Row(variant="compact", elem_id="axis_options"):
draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=self.elem_id("draw_legend")) with gr.Column():
include_lone_images = gr.Checkbox(label='Include Separate Images', value=False, elem_id=self.elem_id("include_lone_images")) draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=self.elem_id("draw_legend"))
no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=self.elem_id("no_fixed_seeds")) no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=self.elem_id("no_fixed_seeds"))
swap_axes_button = gr.Button(value="Swap axes", elem_id="xy_grid_swap_axes_button") with gr.Column():
include_lone_images = gr.Checkbox(label='Include Sub Images', value=False, elem_id=self.elem_id("include_lone_images"))
include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids"))
with gr.Column():
margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size"))
with gr.Row(variant="compact", elem_id="swap_axes"):
swap_xy_axes_button = gr.Button(value="Swap X/Y axes", elem_id="xy_grid_swap_axes_button")
swap_yz_axes_button = gr.Button(value="Swap Y/Z axes", elem_id="yz_grid_swap_axes_button")
swap_xz_axes_button = gr.Button(value="Swap X/Z axes", elem_id="xz_grid_swap_axes_button")
def swap_axes(x_type, x_values, y_type, y_values): def swap_axes(axis1_type, axis1_values, axis2_type, axis2_values):
return self.current_axis_options[y_type].label, y_values, self.current_axis_options[x_type].label, x_values return self.current_axis_options[axis2_type].label, axis2_values, self.current_axis_options[axis1_type].label, axis1_values
swap_args = [x_type, x_values, y_type, y_values] xy_swap_args = [x_type, x_values, y_type, y_values]
swap_axes_button.click(swap_axes, inputs=swap_args, outputs=swap_args) swap_xy_axes_button.click(swap_axes, inputs=xy_swap_args, outputs=xy_swap_args)
yz_swap_args = [y_type, y_values, z_type, z_values]
swap_yz_axes_button.click(swap_axes, inputs=yz_swap_args, outputs=yz_swap_args)
xz_swap_args = [x_type, x_values, z_type, z_values]
swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args)
def fill(x_type): def fill(x_type):
axis = self.current_axis_options[x_type] axis = self.current_axis_options[x_type]
@ -326,16 +417,27 @@ class Script(scripts.Script):
fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values]) fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values])
fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values]) fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values])
fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values])
def select_axis(x_type): def select_axis(x_type):
return gr.Button.update(visible=self.current_axis_options[x_type].choices is not None) return gr.Button.update(visible=self.current_axis_options[x_type].choices is not None)
x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button]) x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button])
y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button]) y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button])
z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button])
return [x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds] self.infotext_fields = (
(x_type, "X Type"),
(x_values, "X Values"),
(y_type, "Y Type"),
(y_values, "Y Values"),
(z_type, "Z Type"),
(z_values, "Z Values"),
)
def run(self, p, x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds): return [x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size]
def run(self, p, x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size):
if not no_fixed_seeds: if not no_fixed_seeds:
modules.processing.fix_seed(p) modules.processing.fix_seed(p)
@ -346,7 +448,7 @@ class Script(scripts.Script):
if opt.label == 'Nothing': if opt.label == 'Nothing':
return [0] return [0]
valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))] valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x]
if opt.type == int: if opt.type == int:
valslist_ext = [] valslist_ext = []
@ -409,6 +511,14 @@ class Script(scripts.Script):
y_opt = self.current_axis_options[y_type] y_opt = self.current_axis_options[y_type]
ys = process_axis(y_opt, y_values) ys = process_axis(y_opt, y_values)
z_opt = self.current_axis_options[z_type]
zs = process_axis(z_opt, z_values)
# this could be moved to common code, but unlikely to be ever triggered anywhere else
Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes
grid_mp = round(len(xs) * len(ys) * len(zs) * p.width * p.height / 1000000)
assert grid_mp < opts.img_max_size_mp, f'Error: Resulting grid would be too large ({grid_mp} MPixels) (max configured size is {opts.img_max_size_mp} MPixels)'
def fix_axis_seeds(axis_opt, axis_list): def fix_axis_seeds(axis_opt, axis_list):
if axis_opt.label in ['Seed', 'Var. seed']: if axis_opt.label in ['Seed', 'Var. seed']:
return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list] return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
@ -418,21 +528,26 @@ class Script(scripts.Script):
if not no_fixed_seeds: if not no_fixed_seeds:
xs = fix_axis_seeds(x_opt, xs) xs = fix_axis_seeds(x_opt, xs)
ys = fix_axis_seeds(y_opt, ys) ys = fix_axis_seeds(y_opt, ys)
zs = fix_axis_seeds(z_opt, zs)
if x_opt.label == 'Steps': if x_opt.label == 'Steps':
total_steps = sum(xs) * len(ys) total_steps = sum(xs) * len(ys) * len(zs)
elif y_opt.label == 'Steps': elif y_opt.label == 'Steps':
total_steps = sum(ys) * len(xs) total_steps = sum(ys) * len(xs) * len(zs)
elif z_opt.label == 'Steps':
total_steps = sum(zs) * len(xs) * len(ys)
else: else:
total_steps = p.steps * len(xs) * len(ys) total_steps = p.steps * len(xs) * len(ys) * len(zs)
if isinstance(p, StableDiffusionProcessingTxt2Img) and p.enable_hr: if isinstance(p, StableDiffusionProcessingTxt2Img) and p.enable_hr:
if x_opt.label == "Hires steps": if x_opt.label == "Hires steps":
total_steps += sum(xs) * len(ys) total_steps += sum(xs) * len(ys) * len(zs)
elif y_opt.label == "Hires steps": elif y_opt.label == "Hires steps":
total_steps += sum(ys) * len(xs) total_steps += sum(ys) * len(xs) * len(zs)
elif z_opt.label == "Hires steps":
total_steps += sum(zs) * len(xs) * len(ys)
elif p.hr_second_pass_steps: elif p.hr_second_pass_steps:
total_steps += p.hr_second_pass_steps * len(xs) * len(ys) total_steps += p.hr_second_pass_steps * len(xs) * len(ys) * len(zs)
else: else:
total_steps *= 2 total_steps *= 2
@ -440,28 +555,57 @@ class Script(scripts.Script):
image_cell_count = p.n_iter * p.batch_size image_cell_count = p.n_iter * p.batch_size
cell_console_text = f"; {image_cell_count} images per cell" if image_cell_count > 1 else "" cell_console_text = f"; {image_cell_count} images per cell" if image_cell_count > 1 else ""
print(f"X/Y plot will create {len(xs) * len(ys) * image_cell_count} images on a {len(xs)}x{len(ys)} grid{cell_console_text}. (Total steps to process: {total_steps})") plural_s = 's' if len(zs) > 1 else ''
print(f"X/Y/Z plot will create {len(xs) * len(ys) * len(zs) * image_cell_count} images on {len(zs)} {len(xs)}x{len(ys)} grid{plural_s}{cell_console_text}. (Total steps to process: {total_steps})")
shared.total_tqdm.updateTotal(total_steps) shared.total_tqdm.updateTotal(total_steps)
grid_infotext = [None] state.xyz_plot_x = AxisInfo(x_opt, xs)
state.xyz_plot_y = AxisInfo(y_opt, ys)
state.xyz_plot_z = AxisInfo(z_opt, zs)
# If one of the axes is very slow to change between (like SD model # If one of the axes is very slow to change between (like SD model
# checkpoint), then make sure it is in the outer iteration of the nested # checkpoint), then make sure it is in the outer iteration of the nested
# `for` loop. # `for` loop.
swap_axes_processing_order = x_opt.cost > y_opt.cost first_axes_processed = 'z'
second_axes_processed = 'y'
if x_opt.cost > y_opt.cost and x_opt.cost > z_opt.cost:
first_axes_processed = 'x'
if y_opt.cost > z_opt.cost:
second_axes_processed = 'y'
else:
second_axes_processed = 'z'
elif y_opt.cost > x_opt.cost and y_opt.cost > z_opt.cost:
first_axes_processed = 'y'
if x_opt.cost > z_opt.cost:
second_axes_processed = 'x'
else:
second_axes_processed = 'z'
elif z_opt.cost > x_opt.cost and z_opt.cost > y_opt.cost:
first_axes_processed = 'z'
if x_opt.cost > y_opt.cost:
second_axes_processed = 'x'
else:
second_axes_processed = 'y'
def cell(x, y): grid_infotext = [None] * (1 + len(zs))
def cell(x, y, z, ix, iy, iz):
if shared.state.interrupted: if shared.state.interrupted:
return Processed(p, [], p.seed, "") return Processed(p, [], p.seed, "")
pc = copy(p) pc = copy(p)
pc.styles = pc.styles[:]
x_opt.apply(pc, x, xs) x_opt.apply(pc, x, xs)
y_opt.apply(pc, y, ys) y_opt.apply(pc, y, ys)
z_opt.apply(pc, z, zs)
res = process_images(pc) res = process_images(pc)
if grid_infotext[0] is None: # Sets subgrid infotexts
subgrid_index = 1 + iz
if grid_infotext[subgrid_index] is None and ix == 0 and iy == 0:
pc.extra_generation_params = copy(pc.extra_generation_params) pc.extra_generation_params = copy(pc.extra_generation_params)
pc.extra_generation_params['Script'] = self.title()
if x_opt.label != 'Nothing': if x_opt.label != 'Nothing':
pc.extra_generation_params["X Type"] = x_opt.label pc.extra_generation_params["X Type"] = x_opt.label
@ -475,24 +619,67 @@ class Script(scripts.Script):
if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys]) pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys])
grid_infotext[subgrid_index] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds)
# Sets main grid infotext
if grid_infotext[0] is None and ix == 0 and iy == 0 and iz == 0:
pc.extra_generation_params = copy(pc.extra_generation_params)
if z_opt.label != 'Nothing':
pc.extra_generation_params["Z Type"] = z_opt.label
pc.extra_generation_params["Z Values"] = z_values
if z_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
pc.extra_generation_params["Fixed Z Values"] = ", ".join([str(z) for z in zs])
grid_infotext[0] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds) grid_infotext[0] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds)
return res return res
with SharedSettingsStackHelper(): with SharedSettingsStackHelper():
processed = draw_xy_grid( processed = draw_xyz_grid(
p, p,
xs=xs, xs=xs,
ys=ys, ys=ys,
zs=zs,
x_labels=[x_opt.format_value(p, x_opt, x) for x in xs], x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
y_labels=[y_opt.format_value(p, y_opt, y) for y in ys], y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
z_labels=[z_opt.format_value(p, z_opt, z) for z in zs],
cell=cell, cell=cell,
draw_legend=draw_legend, draw_legend=draw_legend,
include_lone_images=include_lone_images, include_lone_images=include_lone_images,
swap_axes_processing_order=swap_axes_processing_order include_sub_grids=include_sub_grids,
first_axes_processed=first_axes_processed,
second_axes_processed=second_axes_processed,
margin_size=margin_size
) )
if not processed.images:
# It broke, no further handling needed.
return processed
z_count = len(zs)
# Set the grid infotexts to the real ones with extra_generation_params (1 main grid + z_count sub-grids)
processed.infotexts[:1+z_count] = grid_infotext[:1+z_count]
if not include_lone_images:
# Don't need sub-images anymore, drop from list:
processed.images = processed.images[:z_count+1]
if opts.grid_save: if opts.grid_save:
images.save_image(processed.images[0], p.outpath_grids, "xy_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p) # Auto-save main and sub-grids:
grid_count = z_count + 1 if z_count > 1 else 1
for g in range(grid_count):
#TODO: See previous comment about intentional data misalignment.
adj_g = g-1 if g > 0 else g
images.save_image(processed.images[g], p.outpath_grids, "xyz_grid", info=processed.infotexts[g], extension=opts.grid_format, prompt=processed.all_prompts[adj_g], seed=processed.all_seeds[adj_g], grid=True, p=processed)
if not include_sub_grids:
# Done with sub-grids, drop all related information:
for sg in range(z_count):
del processed.images[1]
del processed.all_prompts[1]
del processed.all_seeds[1]
del processed.infotexts[1]
return processed return processed

914
style.css

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,9 @@
import os
import unittest import unittest
import requests import requests
from gradio.processing_utils import encode_pil_to_base64 from gradio.processing_utils import encode_pil_to_base64
from PIL import Image from PIL import Image
from modules.paths import script_path
class TestExtrasWorking(unittest.TestCase): class TestExtrasWorking(unittest.TestCase):
def setUp(self): def setUp(self):
@ -19,7 +21,7 @@ class TestExtrasWorking(unittest.TestCase):
"upscaler_1": "None", "upscaler_1": "None",
"upscaler_2": "None", "upscaler_2": "None",
"extras_upscaler_2_visibility": 0, "extras_upscaler_2_visibility": 0,
"image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")) "image": encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png")))
} }
def test_simple_upscaling_performed(self): def test_simple_upscaling_performed(self):
@ -31,7 +33,7 @@ class TestPngInfoWorking(unittest.TestCase):
def setUp(self): def setUp(self):
self.url_png_info = "http://localhost:7860/sdapi/v1/extra-single-image" self.url_png_info = "http://localhost:7860/sdapi/v1/extra-single-image"
self.png_info = { self.png_info = {
"image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")) "image": encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png")))
} }
def test_png_info_performed(self): def test_png_info_performed(self):
@ -42,7 +44,7 @@ class TestInterrogateWorking(unittest.TestCase):
def setUp(self): def setUp(self):
self.url_interrogate = "http://localhost:7860/sdapi/v1/extra-single-image" self.url_interrogate = "http://localhost:7860/sdapi/v1/extra-single-image"
self.interrogate = { self.interrogate = {
"image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")), "image": encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png"))),
"model": "clip" "model": "clip"
} }

Some files were not shown because too many files have changed in this diff Show More