From 3f55d9a932145133e63000331fac1e180aed41f6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 6 Sep 2022 10:58:58 +0300 Subject: [PATCH] collab link --- README.md | 8 +++++++- modules/shared.py | 6 +----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 4214834e..9a59d1e5 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ A browser interface based on Gradio library for Stable Diffusion. ## Feature showcase -[Detailed feature showcase with images, art by Greg Rutkowski](https://github.com/AUTOMATIC1111/stable-diffusion-webui-feature-showcase) +[Detailed feature showcase with images, art by Greg Rutkowski](https://github.com/AUTOMATIC1111/stable-diffusion-webui-feature-showcase) - Original txt2img and img2img modes - One click install and run script (but you still must install python, git and CUDA) @@ -72,6 +72,12 @@ as model if it has .pth extension. Grab models from the [Model Database](https:/ - webui.bat installs requirements from files `requirements_versions.txt`, which lists versions for modules specifically compatible with Python 3.10.6. If you choose to install for a different version of python, editing `webui.bat` to have `set REQS_FILE=requirements.txt` instead of `set REQS_FILE=requirements_versions.txt` may help (but I still reccomend you to just use the recommended version of python). - if you feel you broke something and want to reinstall from scratch, delete directories: `venv`, `repositories`. +## Google collab + +If you don't want or can't run locally, here is google collab that allows you to run the webui: + +https://colab.research.google.com/drive/1Iy-xW9t1-OQWhb0hNxueGij8phCyluOh + ### Manual instructions Alternatively, if you don't want to run webui.bat, here are instructions for installing everything by hand: diff --git a/modules/shared.py b/modules/shared.py index c86cf4a6..d57aba37 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -32,11 +32,7 @@ parser.add_argument("--share", action='store_true', help="use share=True for gra parser.add_argument("--esrgan-models-path", type=str, help="path to directory with ESRGAN models", default=os.path.join(script_path, 'ESRGAN')) parser.add_argument("--opt-split-attention", action='store_true', help="enable optimization that reduced vram usage by a lot for about 10% decrease in performance") parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") - -if 'notebook_argv' in globals(): - cmd_opts = parser.parse_args(notebook_argv) -else: - cmd_opts = parser.parse_args() +cmd_opts = parser.parse_args() cpu = torch.device("cpu") gpu = torch.device("cuda")