Merge latest kohya_ss code update

This commit is contained in:
bmaltais 2023-01-01 13:10:32 -05:00
parent 2c069d1067
commit ee2499d834
5 changed files with 1760 additions and 2592 deletions

View File

@ -4,7 +4,7 @@ extract factors the build is dependent on:
[ ] TODO: Q - What if we have multiple GPUs of different makes?
- CUDA version
- Software:
- CPU-only: only CPU quantization functions (no optimizer, no matrix multipl)
- CPU-only: only CPU quantization functions (no optimizer, no matrix multiple)
- CuBLAS-LT: full-build 8-bit optimizer
- no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
@ -44,7 +44,7 @@ def get_cuda_version(cuda, cudart_path):
minor = (version-(major*1000))//10
if major < 11:
print('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
print('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
return f'{major}{minor}'
@ -163,4 +163,4 @@ def evaluate_cuda_setup():
binary_name = get_binary_name()
return binary_name
return binary_name

File diff suppressed because it is too large Load Diff

View File

@ -2486,9 +2486,9 @@ if __name__ == '__main__':
parser.add_argument("--bf16", action='store_true', help='use bfloat16 / bfloat16を指定し省メモリ化する')
parser.add_argument("--xformers", action='store_true', help='use xformers / xformersを使用し高速化する')
parser.add_argument("--diffusers_xformers", action='store_true',
help='use xformers by diffusers (Hypernetworks doen\'t work) / Diffusersでxformersを使用するHypernetwork利用不可')
help='use xformers by diffusers (Hypernetworks doesn\'t work) / Diffusersでxformersを使用するHypernetwork利用不可')
parser.add_argument("--opt_channels_last", action='store_true',
help='set channels last option to model / モデルにchannles lastを指定し最適化する')
help='set channels last option to model / モデルにchannels lastを指定し最適化する')
parser.add_argument("--network_module", type=str, default=None, help='Hypernetwork module to use / Hypernetworkを使う時そのモジュール名')
parser.add_argument("--network_weights", type=str, default=None, help='Hypernetwork weights to load / Hypernetworkの重み')
parser.add_argument("--network_mul", type=float, default=1.0, help='Hypernetwork multiplier / Hypernetworkの効果の倍率')
@ -2514,4 +2514,4 @@ if __name__ == '__main__':
help="save 1st stage images for highres fix / highres fixの最初のステージの画像を保存する")
args = parser.parse_args()
main(args)
main(args)

File diff suppressed because it is too large Load Diff

View File

@ -925,11 +925,12 @@ def train(args):
print(f"update token length: {args.max_token_length}")
# 学習データを用意する
assert args.resolution is not None, f"resolution is required / resolution解像度を指定してください"
resolution = tuple([int(r) for r in args.resolution.split(',')])
if len(resolution) == 1:
resolution = (resolution[0], resolution[0])
assert len(resolution) == 2, \
f"resolution must be 'size' or 'width,height' / resolution'サイズ'または'','高さ'で指定してください: {args.resolution}"
f"resolution must be 'size' or 'width,height' / resolution(解像度)'サイズ'または'','高さ'で指定してください: {args.resolution}"
if args.face_crop_aug_range is not None:
face_crop_aug_range = tuple([float(r) for r in args.face_crop_aug_range.split(',')])
@ -1373,9 +1374,9 @@ if __name__ == '__main__':
help="keep heading N tokens when shuffling caption tokens / captionのシャッフル時に、先頭からこの個数のトークンをシャッフルしないで残す")
parser.add_argument("--train_data_dir", type=str, default=None, help="directory for train images / 学習画像データのディレクトリ")
parser.add_argument("--reg_data_dir", type=str, default=None, help="directory for regularization images / 正則化画像データのディレクトリ")
parser.add_argument("--in_json", type=str, default=None, help="json meatadata for dataset / データセットのmetadataのjsonファイル")
parser.add_argument("--in_json", type=str, default=None, help="json metadata for dataset / データセットのmetadataのjsonファイル")
parser.add_argument("--caption_extension", type=str, default=".caption", help="extension of caption files / 読み込むcaptionファイルの拡張子")
parser.add_argument("--dataset_repeats", type=int, default=None,
parser.add_argument("--dataset_repeats", type=int, default=1,
help="repeat dataset when training with captions / キャプションでの学習時にデータセットを繰り返す回数")
parser.add_argument("--output_dir", type=str, default=None,
help="directory to output trained model / 学習後のモデル出力先ディレクトリ")