Corrected typos in shared.py and README

This commit is contained in:
Steve Eberhardt 2022-09-14 20:04:05 -07:00 committed by AUTOMATIC1111
parent 6a4db7b9a5
commit 4a626f6ea6
2 changed files with 8 additions and 8 deletions

View File

@ -112,7 +112,7 @@ You can do this for python, but not for git.
to enable appropriate optimization according to low VRAM guide below (for example, `set COMMANDLINE_ARGS=--medvram --opt-split-attention`). to enable appropriate optimization according to low VRAM guide below (for example, `set COMMANDLINE_ARGS=--medvram --opt-split-attention`).
- to prevent the creation of virtual environment and use your system python, use custom parameter replacing `set VENV_DIR=-` (see below). - to prevent the creation of virtual environment and use your system python, use custom parameter replacing `set VENV_DIR=-` (see below).
- webui.bat installs requirements from files `requirements_versions.txt`, which lists versions for modules specifically compatible with - webui.bat installs requirements from files `requirements_versions.txt`, which lists versions for modules specifically compatible with
Python 3.10.6. If you choose to install for a different version of python, using custom parameter `set REQS_FILE=requirements.txt` Python 3.10.6. If you choose to install for a different version of python, using custom parameter `set REQS_FILE=requirements.txt`
may help (but I still recommend you to just use the recommended version of python). may help (but I still recommend you to just use the recommended version of python).
- if you feel you broke something and want to reinstall from scratch, delete directories: `venv`, `repositories`. - if you feel you broke something and want to reinstall from scratch, delete directories: `venv`, `repositories`.
- if you get a green or black screen instead of generated pictures, you have a card that doesn't support half precision - if you get a green or black screen instead of generated pictures, you have a card that doesn't support half precision
@ -166,7 +166,7 @@ Here's a list of optimization arguments:
- If you have 4GB VRAM and want to make 512x512 (or maybe up to 640x640) images, use `--medvram`. - If you have 4GB VRAM and want to make 512x512 (or maybe up to 640x640) images, use `--medvram`.
- If you have 4GB VRAM and want to make 512x512 images, but you get an out of memory error with `--medvram`, use `--medvram --opt-split-attention` instead. - If you have 4GB VRAM and want to make 512x512 images, but you get an out of memory error with `--medvram`, use `--medvram --opt-split-attention` instead.
- If you have 4GB VRAM and want to make 512x512 images, and you still get an out of memory error, use `--lowvram --always-batch-cond-uncond --opt-split-attention` instead. - If you have 4GB VRAM and want to make 512x512 images, and you still get an out of memory error, use `--lowvram --always-batch-cond-uncond --opt-split-attention` instead.
- If you have 4GB VRAM and want to make images larger than you can with `--medvram`, use `--lowvram --opt-split-attention`. - If you have 4GB VRAM and want to make images larger than you can with `--medvram`, use `--lowvram --opt-split-attention`.
- If you have more VRAM and want to make larger images than you can usually make (for example 1024x1024 instead of 512x512), use `--medvram --opt-split-attention`. You can use `--lowvram` - If you have more VRAM and want to make larger images than you can usually make (for example 1024x1024 instead of 512x512), use `--medvram --opt-split-attention`. You can use `--lowvram`
also but the effect will likely be barely noticeable. also but the effect will likely be barely noticeable.
- Otherwise, do not use any of those. - Otherwise, do not use any of those.
@ -174,7 +174,7 @@ also but the effect will likely be barely noticeable.
### Running online ### Running online
Use the `--share` option to run online. You will get a xxx.app.gradio link. This is the intended way to use the Use the `--share` option to run online. You will get a xxx.app.gradio link. This is the intended way to use the
program in collabs. You may set up authentication for said gradio shared instance with the flag `--gradio-auth username:password`, optionally providing multiple sets of usernames and passwords separated by commas. program in Colab. You may set up authentication for said gradio shared instance with the flag `--gradio-auth username:password`, optionally providing multiple sets of usernames and passwords separated by commas.
Use `--listen` to make the server listen to network connections. This will allow computers on the local network Use `--listen` to make the server listen to network connections. This will allow computers on the local network
to access the UI, and if you configure port forwarding, also computers on the internet. to access the UI, and if you configure port forwarding, also computers on the internet.
@ -183,9 +183,9 @@ Use `--port xxxx` to make the server listen on a specific port, xxxx being the w
all ports below 1024 need root/admin rights, for this reason it is advised to use a port above 1024. all ports below 1024 need root/admin rights, for this reason it is advised to use a port above 1024.
Defaults to port 7860 if available. Defaults to port 7860 if available.
### Google collab ### Google Colab
If you don't want or can't run locally, here is a Google colab that allows you to run the webui: If you don't want or can't run locally, here is a Google Colab that allows you to run the webui:
https://colab.research.google.com/drive/1Iy-xW9t1-OQWhb0hNxueGij8phCyluOh https://colab.research.google.com/drive/1Iy-xW9t1-OQWhb0hNxueGij8phCyluOh

View File

@ -23,7 +23,7 @@ parser.add_argument("--ckpt", type=str, default=os.path.join(sd_path, sd_model_f
parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default='GFPGANv1.3.pth') parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default='GFPGANv1.3.pth')
parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats") parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware accleration in browser)") parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI") parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)") parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui") parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
@ -142,8 +142,8 @@ class Options:
"interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"), "interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"),
"interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"), "interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"),
"interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), "interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
"interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum descripton length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), "interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
"interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum descripton length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), "interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
"interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"), "interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"),
} }