This commit is contained in:
bmaltais 2023-03-06 12:46:57 -05:00
parent 414a98d100
commit fccb1c3359
2 changed files with 50 additions and 2 deletions

View File

@ -180,6 +180,7 @@ This will store your a backup file with your current locally installed pip packa
- Add replace underscore with space option to WD14 captioning. Thanks @sALTaccount!
- Improve how custom preset is set and handles.
- Add support for `--listen` argument. This allow gradio to listen for connections from other devices on the network (or internet). For example: `gui.ps1 --listen "0.0.0.0"` will allow anyone to connect to the gradio webui.
- Updated `Resize LoRA` tab to support LoCon resizing. Added new resize
* 2023/03/05 (v21.1.4):
- Removing legacy and confusing use 8bit adam chackbox. It is now configured using the Optimiser drop down list. It will be set properly based on legacy config files.
* 2023/03/04 (v21.1.3):

View File

@ -17,6 +17,9 @@ def resize_lora(
save_to,
save_precision,
device,
dynamic_method,
dynamic_param,
verbose,
):
# Check for caption_text_input
if model == '':
@ -27,16 +30,35 @@ def resize_lora(
if not os.path.isfile(model):
msgbox('The provided model is not a file')
return
if dynamic_method == 'sv_ratio':
if float(dynamic_param) < 2:
msgbox(f'Dynamic parameter for {dynamic_method} need to be 2 or greater...')
return
if dynamic_method == 'sv_fro' or dynamic_method == 'sv_cumulative':
if float(dynamic_param) < 0 or float(dynamic_param) > 1:
msgbox(f'Dynamic parameter for {dynamic_method} need to be between 0 and 1...')
return
# Check if save_to end with one of the defines extension. If not add .safetensors.
if not save_to.endswith(('.pt', '.safetensors')):
save_to += '.safetensors'
if device == '':
device = 'cuda'
run_cmd = f'{PYTHON} "{os.path.join("networks","resize_lora.py")}"'
run_cmd = f'{PYTHON} "{os.path.join("tools","resize_lora.py")}"'
run_cmd += f' --save_precision {save_precision}'
run_cmd += f' --save_to {save_to}'
run_cmd += f' --model {model}'
run_cmd += f' --new_rank {new_rank}'
run_cmd += f' --device {device}'
if not dynamic_method == 'None':
run_cmd += f' --dynamic_method {dynamic_method}'
run_cmd += f' --dynamic_param {dynamic_param}'
if verbose:
run_cmd += f' --verbose'
print(run_cmd)
@ -56,7 +78,7 @@ def gradio_resize_lora_tab():
with gr.Tab('Resize LoRA'):
gr.Markdown('This utility can resize a LoRA.')
lora_ext = gr.Textbox(value='*.pt *.safetensors', visible=False)
lora_ext = gr.Textbox(value='*.safetensors *.pt', visible=False)
lora_ext_name = gr.Textbox(value='LoRA model types', visible=False)
with gr.Row():
@ -84,6 +106,27 @@ def gradio_resize_lora_tab():
interactive=True,
)
with gr.Row():
dynamic_method = gr.Dropdown(
choices=['None',
'sv_ratio',
'sv_fro',
'sv_cumulative'
],
value='sv_fro',
label='Dynamic method',
interactive=True
)
dynamic_param = gr.Textbox(
label='Dynamic parameter',
value='0.9',
interactive=True,
placeholder='Value for the dynamic method selected.'
)
verbose = gr.Checkbox(
label='Verbose',
value=False
)
with gr.Row():
save_to = gr.Textbox(
label='Save to',
@ -109,6 +152,7 @@ def gradio_resize_lora_tab():
label='Device',
placeholder='{Optional) device to use, cuda for GPU. Default: cuda',
interactive=True,
value='cuda',
)
convert_button = gr.Button('Resize model')
@ -121,6 +165,9 @@ def gradio_resize_lora_tab():
save_to,
save_precision,
device,
dynamic_method,
dynamic_param,
verbose,
],
show_progress=False,
)