Skip to content

Commit

Permalink
fix typo
Browse files Browse the repository at this point in the history
  • Loading branch information
sdbds committed Apr 18, 2024
1 parent 073ee01 commit 00e832e
Show file tree
Hide file tree
Showing 9 changed files with 23 additions and 23 deletions.
8 changes: 4 additions & 4 deletions fine_tune.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
ds_model = deepspeed_utils.prepare_deepspeed_model(args, unet=unet, text_encoder=text_encoder)
else:
ds_model = deepspeed_utils.prepare_deepspeed_model(args, unet=unet)
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
ds_model, optimizer, train_dataloader = accelerator.prepare(
ds_model, optimizer, train_dataloader
)
Expand All @@ -267,7 +267,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
else:
# acceleratorがなんかよろしくやってくれるらしい
if args.train_text_encoder:
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
unet, text_encoder, optimizer, train_dataloader = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader
)
Expand All @@ -276,7 +276,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
)
else:
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
unet, optimizer, train_dataloader = accelerator.prepare(unet, optimizer, train_dataloader)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)
Expand Down Expand Up @@ -405,7 +405,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)

optimizer.step()
if not args.optimizer_type.lower().endswith("scheduleFree"):
if not args.optimizer_type.lower().endswith("schedulefree"):
lr_scheduler.step()
optimizer.zero_grad(set_to_none=True)

Expand Down
6 changes: 3 additions & 3 deletions sdxl_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
text_encoder2=text_encoder2 if train_text_encoder2 else None,
)
# most of ZeRO stage uses optimizer partitioning, so we have to prepare optimizer and ds_model at the same time. # pull/1139#issuecomment-1986790007
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
ds_model, optimizer, train_dataloader = accelerator.prepare(
ds_model, optimizer, train_dataloader
)
Expand All @@ -433,7 +433,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
text_encoder1 = accelerator.prepare(text_encoder1)
if train_text_encoder2:
text_encoder2 = accelerator.prepare(text_encoder2)
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
optimizer, train_dataloader = accelerator.prepare(optimizer, train_dataloader)
else:
optimizer, train_dataloader, lr_scheduler = accelerator.prepare(optimizer, train_dataloader, lr_scheduler)
Expand Down Expand Up @@ -636,7 +636,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)

optimizer.step()
if not args.optimizer_type.lower().endswith("scheduleFree"):
if not args.optimizer_type.lower().endswith("schedulefree"):
lr_scheduler.step()
optimizer.zero_grad(set_to_none=True)

Expand Down
4 changes: 2 additions & 2 deletions sdxl_train_control_net_lllite.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ def train(args):
unet.to(weight_dtype)

# acceleratorがなんかよろしくやってくれるらしい
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
unet, optimizer, train_dataloader = accelerator.prepare(unet, optimizer, train_dataloader)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)
Expand Down Expand Up @@ -491,7 +491,7 @@ def remove_model(old_ckpt_name):
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)

optimizer.step()
if not args.optimizer_type.lower().endswith("scheduleFree"):
if not args.optimizer_type.lower().endswith("schedulefree"):
lr_scheduler.step()
optimizer.zero_grad(set_to_none=True)

Expand Down
4 changes: 2 additions & 2 deletions sdxl_train_control_net_lllite_old.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ def train(args):
network.to(weight_dtype)

# acceleratorがなんかよろしくやってくれるらしい
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
unet, network, optimizer, train_dataloader = accelerator.prepare(
unet, network, optimizer, train_dataloader
)
Expand Down Expand Up @@ -460,7 +460,7 @@ def remove_model(old_ckpt_name):
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)

optimizer.step()
if not args.optimizer_type.lower().endswith("scheduleFree"):
if not args.optimizer_type.lower().endswith("schedulefree"):
lr_scheduler.step()
optimizer.zero_grad(set_to_none=True)

Expand Down
2 changes: 1 addition & 1 deletion train_controlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ def train(args):
controlnet.to(weight_dtype)

# acceleratorがなんかよろしくやってくれるらしい
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
controlnet, optimizer, train_dataloader = accelerator.prepare(
controlnet, optimizer, train_dataloader
)
Expand Down
8 changes: 4 additions & 4 deletions train_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ def train(args):
ds_model = deepspeed_utils.prepare_deepspeed_model(args, unet=unet, text_encoder=text_encoder)
else:
ds_model = deepspeed_utils.prepare_deepspeed_model(args, unet=unet)
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
ds_model, optimizer, train_dataloader = accelerator.prepare(
ds_model, optimizer, train_dataloader
)
Expand All @@ -241,7 +241,7 @@ def train(args):

else:
if train_text_encoder:
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
unet, text_encoder, optimizer, train_dataloader = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader
)
Expand All @@ -251,7 +251,7 @@ def train(args):
)
training_models = [unet, text_encoder]
else:
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
unet, optimizer, train_dataloader = accelerator.prepare(unet, optimizer, train_dataloader)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)
Expand Down Expand Up @@ -399,7 +399,7 @@ def train(args):
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)

optimizer.step()
if not args.optimizer_type.lower().endswith("scheduleFree"):
if not args.optimizer_type.lower().endswith("schedulefree"):
lr_scheduler.step()
optimizer.zero_grad(set_to_none=True)

Expand Down
6 changes: 3 additions & 3 deletions train_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ def train(self, args):
text_encoder2=text_encoders[1] if train_text_encoder and len(text_encoders) > 1 else None,
network=network,
)
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
ds_model, optimizer, train_dataloader = accelerator.prepare(
ds_model, optimizer, train_dataloader
)
Expand All @@ -443,7 +443,7 @@ def train(self, args):
else:
pass # if text_encoder is not trained, no need to prepare. and device and dtype are already set

if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
network, optimizer, train_dataloader = accelerator.prepare(
network, optimizer, train_dataloader
)
Expand Down Expand Up @@ -924,7 +924,7 @@ def remove_model(old_ckpt_name):
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)

optimizer.step()
if not args.optimizer_type.lower().endswith("scheduleFree"):
if not args.optimizer_type.lower().endswith("schedulefree"):
lr_scheduler.step()
optimizer.zero_grad(set_to_none=True)

Expand Down
4 changes: 2 additions & 2 deletions train_textual_inversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,7 @@ def train(self, args):

# acceleratorがなんかよろしくやってくれるらしい
if len(text_encoders) == 1:
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
text_encoder_or_list, optimizer, train_dataloader = accelerator.preparet(
text_encoder_or_list, optimizer, train_dataloader
)
Expand All @@ -426,7 +426,7 @@ def train(self, args):
)

elif len(text_encoders) == 2:
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
text_encoder1, text_encoder2, optimizer, train_dataloader = accelerator.prepare(
text_encoders[0], text_encoders[1], optimizer, train_dataloader
)
Expand Down
4 changes: 2 additions & 2 deletions train_textual_inversion_XTI.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ def train(args):
lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)

# acceleratorがなんかよろしくやってくれるらしい
if args.optimizer_type.lower().endswith("scheduleFree"):
if args.optimizer_type.lower().endswith("schedulefree"):
text_encoder, optimizer, train_dataloader = accelerator.prepare(
text_encoder, optimizer, train_dataloader
)
Expand Down Expand Up @@ -507,7 +507,7 @@ def remove_model(old_ckpt_name):
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)

optimizer.step()
if not args.optimizer_type.lower().endswith("scheduleFree"):
if not args.optimizer_type.lower().endswith("schedulefree"):
lr_scheduler.step()
optimizer.zero_grad(set_to_none=True)

Expand Down

0 comments on commit 00e832e

Please sign in to comment.