From 1a59e4dacd743129f277ffa9dd4a0b22dff3cb48 Mon Sep 17 00:00:00 2001 From: Redstone1024 <2824517378@qq.com> Date: Thu, 10 Aug 2023 01:28:42 +0800 Subject: [PATCH] replace download source --- .github/ISSUE_TEMPLATE/config.yml | 2 +- .github/pull_request_template.md | 6 +- CODEOWNERS | 2 +- README.md | 84 +++++++++---------- configs/instruct-pix2pix.yaml | 2 +- .../LDSR/sd_hijack_autoencoder.py | 2 +- .../ScuNET/scripts/scunet_model.py | 4 +- .../SwinIR/scripts/swinir_model.py | 2 +- html/footer.html | 2 +- html/licenses.html | 22 ++--- javascript/imageMaskFix.js | 4 +- modules/codeformer/vqgan_arch.py | 2 +- modules/codeformer_model.py | 2 +- modules/deepbooru.py | 2 +- modules/deepbooru_model.py | 2 +- modules/devices.py | 2 +- modules/errors.py | 2 +- modules/esrgan_model.py | 10 +-- modules/esrgan_model_arch.py | 4 +- modules/gfpgan_model.py | 2 +- modules/hashes.py | 2 +- modules/launch_utils.py | 20 ++--- modules/mac_specific.py | 16 ++-- modules/models/diffusion/ddpm_edit.py | 8 +- modules/ngrok.py | 2 +- modules/realesrgan_model.py | 12 +-- modules/sd_hijack_optimizations.py | 12 +-- modules/sd_models.py | 8 +- modules/sd_vae_approx.py | 2 +- modules/sd_vae_taesd.py | 4 +- modules/shared.py | 14 ++-- modules/sub_quadratic_attention.py | 2 +- modules/textual_inversion/autocrop.py | 2 +- modules/ui.py | 8 +- modules/ui_extensions.py | 2 +- scripts/img2imgalt.py | 2 +- scripts/outpainting_mk_2.py | 2 +- webui-macos-env.sh | 2 +- webui.sh | 2 +- 39 files changed, 141 insertions(+), 141 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f58c94a9..03b9cb28 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,5 +1,5 @@ blank_issues_enabled: false contact_links: - name: WebUI Community Support - url: https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions + url: https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions about: Please ask and answer questions here. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c9fcda2e..fe9aa2ff 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -9,7 +9,7 @@ ## Checklist: -- [ ] I have read [contributing wiki page](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) +- [ ] I have read [contributing wiki page](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) - [ ] I have performed a self-review of my own code -- [ ] My code follows the [style guidelines](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style) -- [ ] My code passes [tests](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests) +- [ ] My code follows the [style guidelines](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing#code-style) +- [ ] My code passes [tests](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Tests) diff --git a/CODEOWNERS b/CODEOWNERS index 7438c9bc..7d390c1d 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -2,7 +2,7 @@ # if you were managing a localization and were removed from this file, this is because # the intended way to do localizations now is via extensions. See: -# https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions +# https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions # Make a repo with your localization and since you are still listed as a collaborator # you can add it to the wiki page yourself. This change is because some people complained # the git commit log is cluttered with things unrelated to almost everyone and diff --git a/README.md b/README.md index b796d150..43b13911 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ A browser interface based on Gradio library for Stable Diffusion. ![](screenshot.png) ## Features -[Detailed feature showcase with images](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features): +[Detailed feature showcase with images](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features): - Original txt2img and img2img modes - One click install and run script (but you still must install python and git) - Outpainting @@ -28,7 +28,7 @@ A browser interface based on Gradio library for Stable Diffusion. - CodeFormer, face restoration tool as an alternative to GFPGAN - RealESRGAN, neural network upscaler - ESRGAN, neural network upscaler with a lot of third party models - - SwinIR and Swin2SR ([see here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/2092)), neural network upscalers + - SwinIR and Swin2SR ([see here](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/2092)), neural network upscalers - LDSR, Latent diffusion super resolution upscaling - Resizing aspect ratio options - Sampling method selection @@ -63,14 +63,14 @@ A browser interface based on Gradio library for Stable Diffusion. - Highres Fix, a convenience option to produce high resolution pictures in one click without usual distortions - Reloading checkpoints on the fly - Checkpoint Merger, a tab that allows you to merge up to 3 checkpoints into one -- [Custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) with many extensions from community +- [Custom scripts](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) with many extensions from community - [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once - separate prompts using uppercase `AND` - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2` - No token limit for prompts (original stable diffusion lets you use up to 75 tokens) - DeepDanbooru integration, creates danbooru style tags for anime prompts -- [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add `--xformers` to commandline args) -- via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI +- [xformers](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add `--xformers` to commandline args) +- via extension: [History tab](https://ghproxy.com/https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI - Generate forever option - Training tab - hypernetworks and embeddings options @@ -82,10 +82,10 @@ A browser interface based on Gradio library for Stable Diffusion. - Can select to load a different VAE from settings screen - Estimated completion time in progress bar - API -- Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML -- via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients)) -- [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions -- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions +- Support for dedicated [inpainting model](https://ghproxy.com/https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML +- via extension: [Aesthetic Gradients](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://ghproxy.com/https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://ghproxy.com/https://github.com/vicgalle/stable-diffusion-aesthetic-gradients)) +- [Stable Diffusion 2.0](https://ghproxy.com/https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions +- [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions - Now without any bad letters! - Load checkpoints in safetensors format - Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64 @@ -93,22 +93,22 @@ A browser interface based on Gradio library for Stable Diffusion. - Reorder elements in the UI from settings screen ## Installation and Running -Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. +Make sure the required [dependencies](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. Alternatively, use online services (like Google Colab): -- [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) +- [List of Online Services](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) ### Installation on Windows 10/11 with NVidia-GPUs using release package -1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract it's contents. +1. Download `sd.webui.zip` from [v1.0.0-pre](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract it's contents. 2. Run `update.bat`. 3. Run `run.bat`. -> For more details see [Install-and-Run-on-NVidia-GPUs](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) +> For more details see [Install-and-Run-on-NVidia-GPUs](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) ### Automatic Installation on Windows 1. Install [Python 3.10.6](https://www.python.org/downloads/release/python-3106/) (Newer version of Python does not support torch), checking "Add Python to PATH". 2. Install [git](https://git-scm.com/download/win). -3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. +3. Download the stable-diffusion-webui repository, for example by running `git clone https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. 4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. ### Automatic Installation on Linux @@ -129,45 +129,45 @@ bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusio 4. Check `webui-user.sh` for options. ### Installation on Apple Silicon -Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). +Find the instructions [here](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). ## Contributing -Here's how to add code to this repo: [Contributing](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) +Here's how to add code to this repo: [Contributing](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing) ## Documentation -The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki). +The documentation was moved from this README over to the project's [wiki](https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki). -For the purposes of getting Google and other search engines to crawl the wiki, here's a link to the (not for humans) [crawlable wiki](https://github-wiki-see.page/m/AUTOMATIC1111/stable-diffusion-webui/wiki). +For the purposes of getting Google and other search engines to crawl the wiki, here's a link to the (not for humans) [crawlable wiki](https://ghproxy.com/https://github-wiki-see.page/m/AUTOMATIC1111/stable-diffusion-webui/wiki). ## Credits Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file. -- Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers -- k-diffusion - https://github.com/crowsonkb/k-diffusion.git -- GFPGAN - https://github.com/TencentARC/GFPGAN.git -- CodeFormer - https://github.com/sczhou/CodeFormer -- ESRGAN - https://github.com/xinntao/ESRGAN -- SwinIR - https://github.com/JingyunLiang/SwinIR -- Swin2SR - https://github.com/mv-lab/swin2sr -- LDSR - https://github.com/Hafiidz/latent-diffusion -- MiDaS - https://github.com/isl-org/MiDaS -- Ideas for optimizations - https://github.com/basujindal/stable-diffusion -- Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing. -- Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion) -- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san/diffusers/pull/1), Amin Rezaei (https://github.com/AminRezaei0x443/memory-efficient-attention) -- Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas). -- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd -- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot -- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator -- Idea for Composable Diffusion - https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch -- xformers - https://github.com/facebookresearch/xformers -- DeepDanbooru - interrogator for anime diffusers https://github.com/KichangKim/DeepDanbooru -- Sampling in float32 precision from a float16 UNet - marunine for the idea, Birch-san for the example Diffusers implementation (https://github.com/Birch-san/diffusers-play/tree/92feee6) -- Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://github.com/timothybrooks/instruct-pix2pix +- Stable Diffusion - https://ghproxy.com/https://github.com/CompVis/stable-diffusion, https://ghproxy.com/https://github.com/CompVis/taming-transformers +- k-diffusion - https://ghproxy.com/https://github.com/crowsonkb/k-diffusion.git +- GFPGAN - https://ghproxy.com/https://github.com/TencentARC/GFPGAN.git +- CodeFormer - https://ghproxy.com/https://github.com/sczhou/CodeFormer +- ESRGAN - https://ghproxy.com/https://github.com/xinntao/ESRGAN +- SwinIR - https://ghproxy.com/https://github.com/JingyunLiang/SwinIR +- Swin2SR - https://ghproxy.com/https://github.com/mv-lab/swin2sr +- LDSR - https://ghproxy.com/https://github.com/Hafiidz/latent-diffusion +- MiDaS - https://ghproxy.com/https://github.com/isl-org/MiDaS +- Ideas for optimizations - https://ghproxy.com/https://github.com/basujindal/stable-diffusion +- Cross Attention layer optimization - Doggettx - https://ghproxy.com/https://github.com/Doggettx/stable-diffusion, original idea for prompt editing. +- Cross Attention layer optimization - InvokeAI, lstein - https://ghproxy.com/https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion) +- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://ghproxy.com/https://github.com/Birch-san/diffusers/pull/1), Amin Rezaei (https://ghproxy.com/https://github.com/AminRezaei0x443/memory-efficient-attention) +- Textual Inversion - Rinon Gal - https://ghproxy.com/https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas). +- Idea for SD upscale - https://ghproxy.com/https://github.com/jquesnelle/txt2imghd +- Noise generation for outpainting mk2 - https://ghproxy.com/https://github.com/parlance-zz/g-diffuser-bot +- CLIP interrogator idea and borrowing some code - https://ghproxy.com/https://github.com/pharmapsychotic/clip-interrogator +- Idea for Composable Diffusion - https://ghproxy.com/https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch +- xformers - https://ghproxy.com/https://github.com/facebookresearch/xformers +- DeepDanbooru - interrogator for anime diffusers https://ghproxy.com/https://github.com/KichangKim/DeepDanbooru +- Sampling in float32 precision from a float16 UNet - marunine for the idea, Birch-san for the example Diffusers implementation (https://ghproxy.com/https://github.com/Birch-san/diffusers-play/tree/92feee6) +- Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://ghproxy.com/https://github.com/timothybrooks/instruct-pix2pix - Security advice - RyotaK -- UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC -- TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd +- UniPC sampler - Wenliang Zhao - https://ghproxy.com/https://github.com/wl-zhao/UniPC +- TAESD - Ollin Boer Bohan - https://ghproxy.com/https://github.com/madebyollin/taesd - LyCORIS - KohakuBlueleaf - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. - (You) diff --git a/configs/instruct-pix2pix.yaml b/configs/instruct-pix2pix.yaml index 4e896879..fcaee3df 100644 --- a/configs/instruct-pix2pix.yaml +++ b/configs/instruct-pix2pix.yaml @@ -1,4 +1,4 @@ -# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion). +# File modified by authors of InstructPix2Pix from original (https://ghproxy.com/https://github.com/CompVis/stable-diffusion). # See more details in LICENSE. model: diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py index c29d274d..e8ac827f 100644 --- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py +++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py @@ -147,7 +147,7 @@ class VQModel(pl.LightningModule): return x def training_step(self, batch, batch_idx, optimizer_idx): - # https://github.com/pytorch/pytorch/issues/37142 + # https://ghproxy.com/https://github.com/pytorch/pytorch/issues/37142 # try not to fool the heuristics x = self.get_input(batch, self.image_key) xrec, qloss, ind = self(x, return_pred_indices=True) diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index 167d2f64..5cf28795 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -18,8 +18,8 @@ class UpscalerScuNET(modules.upscaler.Upscaler): self.name = "ScuNET" self.model_name = "ScuNET GAN" self.model_name2 = "ScuNET PSNR" - self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_gan.pth" - self.model_url2 = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_psnr.pth" + self.model_url = "https://ghproxy.com/https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_gan.pth" + self.model_url2 = "https://ghproxy.com/https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_psnr.pth" self.user_path = dirname super().__init__() model_paths = self.find_models(ext_filter=[".pth"]) diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index ae0d0e6a..8d2fda9a 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -12,7 +12,7 @@ from swinir_model_arch import SwinIR from swinir_model_arch_v2 import Swin2SR from modules.upscaler import Upscaler, UpscalerData -SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth" +SWINIR_MODEL_URL = "https://ghproxy.com/https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth" device_swinir = devices.get_device_for('swinir') diff --git a/html/footer.html b/html/footer.html index 69b2372c..33ae2b5e 100644 --- a/html/footer.html +++ b/html/footer.html @@ -1,7 +1,7 @@
API  •  - Github + Github  •  Gradio  •  diff --git a/html/licenses.html b/html/licenses.html index ef6f2c0a..abedaa2f 100644 --- a/html/licenses.html +++ b/html/licenses.html @@ -4,7 +4,7 @@ #licenses pre { margin: 1em 0 2em 0;} -

CodeFormer

+

CodeFormer

Parts of CodeFormer code had to be copied to be compatible with GFPGAN.
 S-Lab License 1.0
@@ -45,7 +45,7 @@ please contact the contributor(s) of the work.
 
-

ESRGAN

+

ESRGAN

Code for architecture and reading models copied.
 MIT License
@@ -71,7 +71,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 SOFTWARE.
 
-

Real-ESRGAN

+

Real-ESRGAN

Some code is copied to support ESRGAN models.
 BSD 3-Clause License
@@ -105,7 +105,7 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-

InvokeAI

+

InvokeAI

Some code for compatibility with OSX is taken from lstein's repository.
 MIT License
@@ -131,7 +131,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 SOFTWARE.
 
-

LDSR

+

LDSR

Code added by contirubtors, most likely copied from this repository.
 MIT License
@@ -157,7 +157,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 SOFTWARE.
 
-

CLIP Interrogator

+

CLIP Interrogator

Some small amounts of code borrowed and reworked.
 MIT License
@@ -183,7 +183,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 SOFTWARE.
 
-

SwinIR

+

SwinIR

Code added by contributors, most likely copied from this repository.
@@ -390,7 +390,7 @@ SOFTWARE.
    limitations under the License.
 
-

Memory Efficient Attention

+

Memory Efficient Attention

The sub-quadratic cross attention optimization uses modified code from the Memory Efficient Attention package that Alex Birch optimized for 3D tensors. This license is updated to reflect that.
 MIT License
@@ -417,7 +417,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 SOFTWARE.
 
-

Scaled Dot Product Attention

+

Scaled Dot Product Attention

Some small amounts of code borrowed and reworked.
    Copyright 2023 The HuggingFace Team. All rights reserved.
@@ -637,7 +637,7 @@ SOFTWARE.
    limitations under the License.
 
-

Curated transformers

+

Curated transformers

The MPS workaround for nn.Linear on macOS 13.2.X is based on the MPS workaround for nn.Linear created by danieldk for Curated transformers
 The MIT License (MIT)
@@ -663,7 +663,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 THE SOFTWARE.
 
-

TAESD

+

TAESD

Tiny AutoEncoder for Stable Diffusion option for live previews
 MIT License
diff --git a/javascript/imageMaskFix.js b/javascript/imageMaskFix.js
index 900c56f3..cdc079f7 100644
--- a/javascript/imageMaskFix.js
+++ b/javascript/imageMaskFix.js
@@ -1,6 +1,6 @@
 /**
- * temporary fix for https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/668
- * @see https://github.com/gradio-app/gradio/issues/1721
+ * temporary fix for https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/668
+ * @see https://ghproxy.com/https://github.com/gradio-app/gradio/issues/1721
  */
 function imageMaskResize() {
     const canvases = gradioApp().querySelectorAll('#img2maskimg .touch-none canvas');
diff --git a/modules/codeformer/vqgan_arch.py b/modules/codeformer/vqgan_arch.py
index 09ee6660..3e36a2b9 100644
--- a/modules/codeformer/vqgan_arch.py
+++ b/modules/codeformer/vqgan_arch.py
@@ -2,7 +2,7 @@
 
 '''
 VQGAN code, adapted from the original created by the Unleashing Transformers authors:
-https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
+https://ghproxy.com/https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
 
 '''
 import torch
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index da42b5e9..edfa5e56 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -13,7 +13,7 @@ from modules.paths import models_path
 # I am making a choice to include some files from codeformer to work around this issue.
 model_dir = "Codeformer"
 model_path = os.path.join(models_path, model_dir)
-model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
+model_url = 'https://ghproxy.com/https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
 
 codeformer = None
 
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index 547e1b4c..988d7575 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -19,7 +19,7 @@ class DeepDanbooru:
 
         files = modelloader.load_models(
             model_path=os.path.join(paths.models_path, "torch_deepdanbooru"),
-            model_url='https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt',
+            model_url='https://ghproxy.com/https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt',
             ext_filter=[".pt"],
             download_name='model-resnet_custom_v3.pt',
         )
diff --git a/modules/deepbooru_model.py b/modules/deepbooru_model.py
index 83d2ff09..9a52c0cf 100644
--- a/modules/deepbooru_model.py
+++ b/modules/deepbooru_model.py
@@ -4,7 +4,7 @@ import torch.nn.functional as F
 
 from modules import devices
 
-# see https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more
+# see https://ghproxy.com/https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more
 
 
 class DeepDanbooruModel(nn.Module):
diff --git a/modules/devices.py b/modules/devices.py
index 57e51da3..3a763590 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -63,7 +63,7 @@ def enable_tf32():
     if torch.cuda.is_available():
 
         # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
-        # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
+        # see https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
         if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
             torch.backends.cudnn.benchmark = True
 
diff --git a/modules/errors.py b/modules/errors.py
index 5271a9fe..6fb2782f 100644
--- a/modules/errors.py
+++ b/modules/errors.py
@@ -60,7 +60,7 @@ def display(e: Exception, task, *, full_traceback=False):
     if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message:
         print_error_explanation("""
 The most likely cause of this is you are trying to load Stable Diffusion 2.0 model without specifying its config file.
-See https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20 for how to solve this.
+See https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20 for how to solve this.
         """)
 
 
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index 02a1727d..ce4e91d4 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -11,7 +11,7 @@ from modules.upscaler import Upscaler, UpscalerData
 
 
 def mod2normal(state_dict):
-    # this code is copied from https://github.com/victorca25/iNNfer
+    # this code is copied from https://ghproxy.com/https://github.com/victorca25/iNNfer
     if 'conv_first.weight' in state_dict:
         crt_net = {}
         items = list(state_dict)
@@ -44,7 +44,7 @@ def mod2normal(state_dict):
 
 
 def resrgan2normal(state_dict, nb=23):
-    # this code is copied from https://github.com/victorca25/iNNfer
+    # this code is copied from https://ghproxy.com/https://github.com/victorca25/iNNfer
     if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
         re8x = 0
         crt_net = {}
@@ -72,7 +72,7 @@ def resrgan2normal(state_dict, nb=23):
         crt_net['model.6.bias'] = state_dict['conv_up2.bias']
 
         if 'conv_up3.weight' in state_dict:
-            # modification supporting: https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py
+            # modification supporting: https://ghproxy.com/https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py
             re8x = 3
             crt_net['model.9.weight'] = state_dict['conv_up3.weight']
             crt_net['model.9.bias'] = state_dict['conv_up3.bias']
@@ -87,7 +87,7 @@ def resrgan2normal(state_dict, nb=23):
 
 
 def infer_params(state_dict):
-    # this code is copied from https://github.com/victorca25/iNNfer
+    # this code is copied from https://ghproxy.com/https://github.com/victorca25/iNNfer
     scale2x = 0
     scalemin = 6
     n_uplayer = 0
@@ -121,7 +121,7 @@ def infer_params(state_dict):
 class UpscalerESRGAN(Upscaler):
     def __init__(self, dirname):
         self.name = "ESRGAN"
-        self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/ESRGAN.pth"
+        self.model_url = "https://ghproxy.com/https://github.com/cszn/KAIR/releases/download/v1.0/ESRGAN.pth"
         self.model_name = "ESRGAN_4x"
         self.scalers = []
         self.user_path = dirname
diff --git a/modules/esrgan_model_arch.py b/modules/esrgan_model_arch.py
index 2b9888ba..cdf8eed4 100644
--- a/modules/esrgan_model_arch.py
+++ b/modules/esrgan_model_arch.py
@@ -1,4 +1,4 @@
-# this file is adapted from https://github.com/victorca25/iNNfer
+# this file is adapted from https://ghproxy.com/https://github.com/victorca25/iNNfer
 
 from collections import OrderedDict
 import math
@@ -182,7 +182,7 @@ def conv1x1(in_planes, out_planes, stride=1):
 
 class SRVGGNetCompact(nn.Module):
     """A compact VGG-style network structure for super-resolution.
-    This class is copied from https://github.com/xinntao/Real-ESRGAN
+    This class is copied from https://ghproxy.com/https://github.com/xinntao/Real-ESRGAN
     """
 
     def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'):
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index 8e0f13bd..ba8d8f42 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -9,7 +9,7 @@ from modules import paths, shared, devices, modelloader, errors
 model_dir = "GFPGAN"
 user_path = None
 model_path = os.path.join(paths.models_path, model_dir)
-model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
+model_url = "https://ghproxy.com/https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
 have_gfpgan = False
 loaded_gfpgan_model = None
 
diff --git a/modules/hashes.py b/modules/hashes.py
index b7a33b42..48c87e98 100644
--- a/modules/hashes.py
+++ b/modules/hashes.py
@@ -64,7 +64,7 @@ def sha256(filename, title, use_addnet_hash=False):
 
 
 def addnet_hash_safetensors(b):
-    """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py"""
+    """kohya-ss hash for safetensors from https://ghproxy.com/https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py"""
     hash_sha256 = hashlib.sha256()
     blksize = 1024 * 1024
 
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index e1c9cfbe..ac93bd69 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -53,7 +53,7 @@ and delete current Python and "venv" folder in WebUI's directory.
 
 You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/
 
-{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
+{"Alternatively, use a binary release of WebUI: https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
 
 Use --skip-python-version-check to suppress this warning.
 """)
@@ -274,15 +274,15 @@ def prepare_environment():
     requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
 
     xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
-    gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "https://github.com/TencentARC/GFPGAN/archive/8d2447a2d918f8eba5a4a01463fd48e45126a379.zip")
-    clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip")
-    openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
+    gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "https://ghproxy.com/https://github.com/TencentARC/GFPGAN/archive/8d2447a2d918f8eba5a4a01463fd48e45126a379.zip")
+    clip_package = os.environ.get('CLIP_PACKAGE', "https://ghproxy.com/https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip")
+    openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://ghproxy.com/https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
 
-    stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
-    stable_diffusion_xl_repo = os.environ.get('STABLE_DIFFUSION_XL_REPO', "https://github.com/Stability-AI/generative-models.git")
-    k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
-    codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
-    blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
+    stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://ghproxy.com/https://github.com/Stability-AI/stablediffusion.git")
+    stable_diffusion_xl_repo = os.environ.get('STABLE_DIFFUSION_XL_REPO', "https://ghproxy.com/https://github.com/Stability-AI/generative-models.git")
+    k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://ghproxy.com/https://github.com/crowsonkb/k-diffusion.git')
+    codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://ghproxy.com/https://github.com/sczhou/CodeFormer.git')
+    blip_repo = os.environ.get('BLIP_REPO', 'https://ghproxy.com/https://github.com/salesforce/BLIP.git')
 
     stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
     stable_diffusion_xl_commit_hash = os.environ.get('STABLE_DIFFUSION_XL_COMMIT_HASH', "5c10deee76adad0032b412294130090932317a87")
@@ -331,7 +331,7 @@ def prepare_environment():
                 run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True)
             else:
                 print("Installation of xformers is not supported in this version of Python.")
-                print("You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
+                print("You can also check this and build manually: https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
                 if not is_installed("xformers"):
                     exit(0)
         elif platform.system() == "Linux":
diff --git a/modules/mac_specific.py b/modules/mac_specific.py
index 9ceb43ba..f5e16af3 100644
--- a/modules/mac_specific.py
+++ b/modules/mac_specific.py
@@ -11,7 +11,7 @@ log = logging.getLogger(__name__)
 # before torch version 1.13, has_mps is only available in nightly pytorch and macOS 12.3+,
 # use check `getattr` and try it for compatibility.
 # in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availabilty,
-# since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://github.com/pytorch/pytorch/pull/103279
+# since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://ghproxy.com/https://github.com/pytorch/pytorch/pull/103279
 def check_for_mps() -> bool:
     if version.parse(torch.__version__) <= version.parse("2.0.1"):
         if not getattr(torch, 'has_mps', False):
@@ -40,7 +40,7 @@ def torch_mps_gc() -> None:
         log.warning("MPS garbage collection failed", exc_info=True)
 
 
-# MPS workaround for https://github.com/pytorch/pytorch/issues/89784
+# MPS workaround for https://ghproxy.com/https://github.com/pytorch/pytorch/issues/89784
 def cumsum_fix(input, cumsum_func, *args, **kwargs):
     if input.device.type == 'mps':
         output_dtype = kwargs.get('dtype', input.dtype)
@@ -56,19 +56,19 @@ if has_mps:
     CondFunc('torchsde._brownian.brownian_interval._randn', lambda _, size, dtype, device, seed: torch.randn(size, dtype=dtype, device=torch.device("cpu"), generator=torch.Generator(torch.device("cpu")).manual_seed(int(seed))).to(device), lambda _, size, dtype, device, seed: device.type == 'mps')
 
     if platform.mac_ver()[0].startswith("13.2."):
-        # MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124)
+        # MPS workaround for https://ghproxy.com/https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://ghproxy.com/https://github.com/explosion/curated-transformers/pull/124)
         CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760)
 
     if version.parse(torch.__version__) < version.parse("1.13"):
         # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
 
-        # MPS workaround for https://github.com/pytorch/pytorch/issues/79383
+        # MPS workaround for https://ghproxy.com/https://github.com/pytorch/pytorch/issues/79383
         CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs),
                                                           lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps'))
-        # MPS workaround for https://github.com/pytorch/pytorch/issues/80800
+        # MPS workaround for https://ghproxy.com/https://github.com/pytorch/pytorch/issues/80800
         CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs),
                                                                                         lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps')
-        # MPS workaround for https://github.com/pytorch/pytorch/issues/90532
+        # MPS workaround for https://ghproxy.com/https://github.com/pytorch/pytorch/issues/90532
         CondFunc('torch.Tensor.numpy', lambda orig_func, self, *args, **kwargs: orig_func(self.detach(), *args, **kwargs), lambda _, self, *args, **kwargs: self.requires_grad)
     elif version.parse(torch.__version__) > version.parse("1.13.1"):
         cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
@@ -77,10 +77,10 @@ if has_mps:
         CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None)
         CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None)
 
-        # MPS workaround for https://github.com/pytorch/pytorch/issues/96113
+        # MPS workaround for https://ghproxy.com/https://github.com/pytorch/pytorch/issues/96113
         CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda _, input, *args, **kwargs: len(args) == 4 and input.device.type == 'mps')
 
-        # MPS workaround for https://github.com/pytorch/pytorch/issues/92311
+        # MPS workaround for https://ghproxy.com/https://github.com/pytorch/pytorch/issues/92311
         if platform.processor() == 'i386':
             for funcName in ['torch.argmax', 'torch.Tensor.argmax']:
                 CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps')
diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py
index b892d5fc..77181124 100644
--- a/modules/models/diffusion/ddpm_edit.py
+++ b/modules/models/diffusion/ddpm_edit.py
@@ -1,12 +1,12 @@
 """
 wild mixture of
-https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
-https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
-https://github.com/CompVis/taming-transformers
+https://ghproxy.com/https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
+https://ghproxy.com/https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
+https://ghproxy.com/https://github.com/CompVis/taming-transformers
 -- merci
 """
 
-# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
+# File modified by authors of InstructPix2Pix from original (https://ghproxy.com/https://github.com/CompVis/stable-diffusion).
 # See more details in LICENSE.
 
 import torch
diff --git a/modules/ngrok.py b/modules/ngrok.py
index 0c713e27..bc6359f3 100644
--- a/modules/ngrok.py
+++ b/modules/ngrok.py
@@ -11,7 +11,7 @@ def connect(token, port, options):
             token, username, password = token.split(':', 2)
             account = f"{username}:{password}"
 
-    # For all options see: https://github.com/ngrok/ngrok-py/blob/main/examples/ngrok-connect-full.py
+    # For all options see: https://ghproxy.com/https://github.com/ngrok/ngrok-py/blob/main/examples/ngrok-connect-full.py
     if not options.get('authtoken_from_env'):
         options['authtoken'] = token
     if account:
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index 0700b853..f5da14dc 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -86,42 +86,42 @@ def get_realesrgan_models(scaler):
         models = [
             UpscalerData(
                 name="R-ESRGAN General 4xV3",
-                path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
+                path="https://ghproxy.com/https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
                 scale=4,
                 upscaler=scaler,
                 model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
             ),
             UpscalerData(
                 name="R-ESRGAN General WDN 4xV3",
-                path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
+                path="https://ghproxy.com/https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
                 scale=4,
                 upscaler=scaler,
                 model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
             ),
             UpscalerData(
                 name="R-ESRGAN AnimeVideo",
-                path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
+                path="https://ghproxy.com/https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
                 scale=4,
                 upscaler=scaler,
                 model=lambda: SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
             ),
             UpscalerData(
                 name="R-ESRGAN 4x+",
-                path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
+                path="https://ghproxy.com/https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
                 scale=4,
                 upscaler=scaler,
                 model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
             ),
             UpscalerData(
                 name="R-ESRGAN 4x+ Anime6B",
-                path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
+                path="https://ghproxy.com/https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
                 scale=4,
                 upscaler=scaler,
                 model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
             ),
             UpscalerData(
                 name="R-ESRGAN 2x+",
-                path="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
+                path="https://ghproxy.com/https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
                 scale=2,
                 upscaler=scaler,
                 model=lambda: RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index b5f85ba5..3c38f829 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -172,7 +172,7 @@ def get_available_vram():
         return psutil.virtual_memory().available
 
 
-# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion
+# see https://ghproxy.com/https://github.com/basujindal/stable-diffusion/pull/117 for discussion
 def split_cross_attention_forward_v1(self, x, context=None, mask=None, **kwargs):
     h = self.heads
 
@@ -213,7 +213,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None, **kwargs)
     return self.to_out(r2)
 
 
-# taken from https://github.com/Doggettx/stable-diffusion and modified
+# taken from https://ghproxy.com/https://github.com/Doggettx/stable-diffusion and modified
 def split_cross_attention_forward(self, x, context=None, mask=None, **kwargs):
     h = self.heads
 
@@ -277,7 +277,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None, **kwargs):
     return self.to_out(r2)
 
 
-# -- Taken from https://github.com/invoke-ai/InvokeAI and modified --
+# -- Taken from https://ghproxy.com/https://github.com/invoke-ai/InvokeAI and modified --
 mem_total_gb = psutil.virtual_memory().total // (1 << 30)
 
 
@@ -378,10 +378,10 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None, **k
     r = r.to(dtype)
     return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h))
 
-# -- End of code from https://github.com/invoke-ai/InvokeAI --
+# -- End of code from https://ghproxy.com/https://github.com/invoke-ai/InvokeAI --
 
 
-# Based on Birch-san's modified implementation of sub-quadratic attention from https://github.com/Birch-san/diffusers/pull/1
+# Based on Birch-san's modified implementation of sub-quadratic attention from https://ghproxy.com/https://github.com/Birch-san/diffusers/pull/1
 # The sub_quad_attention_forward function is under the MIT License listed under Memory Efficient Attention in the Licenses section of the web UI interface
 def sub_quad_attention_forward(self, x, context=None, mask=None, **kwargs):
     assert mask is None, "attention-mask not currently implemented for SubQuadraticCrossAttnProcessor."
@@ -494,7 +494,7 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs):
     return self.to_out(out)
 
 
-# Based on Diffusers usage of scaled dot product attention from https://github.com/huggingface/diffusers/blob/c7da8fd23359a22d0df2741688b5b4f33c26df21/src/diffusers/models/cross_attention.py
+# Based on Diffusers usage of scaled dot product attention from https://ghproxy.com/https://github.com/huggingface/diffusers/blob/c7da8fd23359a22d0df2741688b5b4f33c26df21/src/diffusers/models/cross_attention.py
 # The scaled_dot_product_attention_forward function contains parts of code under Apache-2.0 license listed under Scaled Dot Product Attention in the Licenses section of the web UI interface
 def scaled_dot_product_attention_forward(self, x, context=None, mask=None, **kwargs):
     batch_size, sequence_length, inner_dim = x.shape
diff --git a/modules/sd_models.py b/modules/sd_models.py
index fb31a793..4603eb97 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -371,10 +371,10 @@ def enable_midas_autodownload():
         midas.api.ISL_PATHS[k] = os.path.join(midas_path, file_name)
 
     midas_urls = {
-        "dpt_large": "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
-        "dpt_hybrid": "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_hybrid-midas-501f0c75.pt",
-        "midas_v21": "https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21-f6b98070.pt",
-        "midas_v21_small": "https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21_small-70d6b9c8.pt",
+        "dpt_large": "https://ghproxy.com/https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
+        "dpt_hybrid": "https://ghproxy.com/https://github.com/intel-isl/DPT/releases/download/1_0/dpt_hybrid-midas-501f0c75.pt",
+        "midas_v21": "https://ghproxy.com/https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21-f6b98070.pt",
+        "midas_v21_small": "https://ghproxy.com/https://github.com/AlexeyAB/MiDaS/releases/download/midas_dpt/midas_v21_small-70d6b9c8.pt",
     }
 
     midas.api.load_model_inner = midas.api.load_model
diff --git a/modules/sd_vae_approx.py b/modules/sd_vae_approx.py
index 86bd658a..5896875a 100644
--- a/modules/sd_vae_approx.py
+++ b/modules/sd_vae_approx.py
@@ -50,7 +50,7 @@ def model():
 
         if not os.path.exists(model_path):
             model_path = os.path.join(paths.models_path, "VAE-approx", model_name)
-            download_model(model_path, 'https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/download/v1.0.0-pre/' + model_name)
+            download_model(model_path, 'https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/download/v1.0.0-pre/' + model_name)
 
         loaded_model = VAEApprox()
         loaded_model.load_state_dict(torch.load(model_path, map_location='cpu' if devices.device.type != 'cuda' else None))
diff --git a/modules/sd_vae_taesd.py b/modules/sd_vae_taesd.py
index 5bf7c76e..f3b35e15 100644
--- a/modules/sd_vae_taesd.py
+++ b/modules/sd_vae_taesd.py
@@ -2,7 +2,7 @@
 Tiny AutoEncoder for Stable Diffusion
 (DNN for encoding / decoding SD's latent space)
 
-https://github.com/madebyollin/taesd
+https://ghproxy.com/https://github.com/madebyollin/taesd
 """
 import os
 import torch
@@ -75,7 +75,7 @@ def model():
 
     if loaded_model is None:
         model_path = os.path.join(paths_internal.models_path, "VAE-taesd", model_name)
-        download_model(model_path, 'https://github.com/madebyollin/taesd/raw/main/' + model_name)
+        download_model(model_path, 'https://ghproxy.com/https://github.com/madebyollin/taesd/raw/main/' + model_name)
 
         if os.path.exists(model_path):
             loaded_model = TAESD(model_path)
diff --git a/modules/shared.py b/modules/shared.py
index aa72c9c8..08020724 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -312,7 +312,7 @@ options_templates = {}
 options_templates.update(options_section(('saving-images', "Saving images/grids"), {
     "samples_save": OptionInfo(True, "Always save all generated images"),
     "samples_format": OptionInfo('png', 'File format for images'),
-    "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+    "samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
     "save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
 
     "grid_save": OptionInfo(True, "Always save all generated image grids"),
@@ -320,7 +320,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
     "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
     "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
     "grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
-    "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+    "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
     "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
     "font": OptionInfo("", "Font for image grids that have text"),
     "grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}),
@@ -367,7 +367,7 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo
     "save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
     "grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
     "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
-    "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
+    "directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
     "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
 }))
 
@@ -425,7 +425,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
     "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
     "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
     "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
-    "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
+    "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
     "upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
     "auto_vae_precision": OptionInfo(True, "Automaticlly revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"),
     "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"),
@@ -440,8 +440,8 @@ options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), {
 
 options_templates.update(options_section(('optimizations', "Optimizations"), {
     "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
-    "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
-    "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
+    "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
+    "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
     "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
     "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
     "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
@@ -556,7 +556,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
     'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise schedule"),
     'rho':  OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a more steep noise schedule (decreases faster)"),
     'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
-    'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
+    'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
     'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}),
     'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}),
     'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}).info("must be < sampling steps"),
diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py
index 497568eb..c3e32879 100644
--- a/modules/sub_quadratic_attention.py
+++ b/modules/sub_quadratic_attention.py
@@ -1,5 +1,5 @@
 # original source:
-#   https://github.com/AminRezaei0x443/memory-efficient-attention/blob/1bc0d9e6ac5f82ea43a375135c4e1d3896ee1694/memory_efficient_attention/attention_torch.py
+#   https://ghproxy.com/https://github.com/AminRezaei0x443/memory-efficient-attention/blob/1bc0d9e6ac5f82ea43a375135c4e1d3896ee1694/memory_efficient_attention/attention_torch.py
 # license:
 #   MIT License (see Memory Efficient Attention under the Licenses section in the web UI interface for the full license)
 # credit:
diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py
index 1675e39a..16cd60d3 100644
--- a/modules/textual_inversion/autocrop.py
+++ b/modules/textual_inversion/autocrop.py
@@ -295,7 +295,7 @@ def is_square(w, h):
 
 
 def download_and_cache_models(dirname):
-    download_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
+    download_url = 'https://ghproxy.com/https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
     model_file_name = 'face_detection_yunet.onnx'
 
     os.makedirs(dirname, exist_ok=True)
diff --git a/modules/ui.py b/modules/ui.py
index 07ecee7b..c8121efe 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1138,7 +1138,7 @@ def create_ui():
 
     with gr.Blocks(analytics_enabled=False) as train_interface:
         with gr.Row().style(equal_height=False):
-            gr.HTML(value="

See wiki for detailed explanation.

") + gr.HTML(value="

See wiki for detailed explanation.

") with gr.Row(variant="compact").style(equal_height=False): with gr.Tabs(elem_id="train_tabs"): @@ -1243,7 +1243,7 @@ def create_ui(): return sorted(textual_inversion.textual_inversion_templates) with gr.Tab(label="Train", id="train"): - gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") + gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") with FormRow(): train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") @@ -1507,7 +1507,7 @@ def create_ui(): gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) footer = shared.html("footer.html") - footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") + footer = footer.format(versions=versions_html(), api_docs="/docs" if shared.cmd_opts.api else "https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API") gr.HTML(footer, elem_id="footer") settings.add_functionality(demo) @@ -1577,7 +1577,7 @@ def versions_html(): xformers_version = "N/A" return f""" -version: {tag} +version: {tag}  •  python: {python_version}  •  diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index f3e4fba7..a3b7a01e 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -124,7 +124,7 @@ def check_updates(id_task, disable_list): def make_commit_link(commit_hash, remote, text=None): if text is None: text = commit_hash[:8] - if remote.startswith("https://github.com/"): + if remote.startswith("https://ghproxy.com/https://github.com/"): if remote.endswith(".git"): remote = remote[:-4] href = remote + "/commit/" + commit_hash diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index 1e833fa8..c4dff840 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -62,7 +62,7 @@ def find_noise_for_image(p, cond, uncond, cfg_scale, steps): Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt", "sigma_adjustment"]) -# Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736 +# Based on changes suggested by briansemrau in https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736 def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps): x = p.init_latent diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index c98ab480..b4f0f77a 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -12,7 +12,7 @@ from modules.processing import Processed, process_images from modules.shared import opts, state -# this function is taken from https://github.com/parlance-zz/g-diffuser-bot +# this function is taken from https://ghproxy.com/https://github.com/parlance-zz/g-diffuser-bot def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.05): # helper fft routines that keep ortho normalization and auto-shift before and after fft def _fft2(data): diff --git a/webui-macos-env.sh b/webui-macos-env.sh index 6354e73b..245b1269 100644 --- a/webui-macos-env.sh +++ b/webui-macos-env.sh @@ -12,7 +12,7 @@ fi export install_dir="$HOME" export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" export TORCH_COMMAND="pip install torch==2.0.1 torchvision==0.15.2" -export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git" +export K_DIFFUSION_REPO="https://ghproxy.com/https://github.com/brkirch/k-diffusion.git" export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71" export PYTORCH_ENABLE_MPS_FALLBACK=1 diff --git a/webui.sh b/webui.sh index cb8b9d14..474d9067 100755 --- a/webui.sh +++ b/webui.sh @@ -187,7 +187,7 @@ else printf "\n%s\n" "${delimiter}" printf "Clone stable-diffusion-webui" printf "\n%s\n" "${delimiter}" - "${GIT}" clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git "${clone_dir}" + "${GIT}" clone https://ghproxy.com/https://github.com/AUTOMATIC1111/stable-diffusion-webui.git "${clone_dir}" cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; } fi