Skip to content

Commit

Permalink
use better context manager to fix potential problems
Browse files Browse the repository at this point in the history
  • Loading branch information
lllyasviel committed Feb 8, 2024
1 parent 4c9db26 commit 760f727
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 1 deletion.
2 changes: 1 addition & 1 deletion modules/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -814,7 +814,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:

infotexts = []
output_images = []
with torch.no_grad():
with torch.inference_mode():
with devices.autocast():
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)

Expand Down
3 changes: 3 additions & 0 deletions modules_forge/forge_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ class WeightsLoader(torch.nn.Module):
return ForgeSD(model_patcher, clip, vae, clipvision)


@torch.inference_mode()
def load_model_for_a1111(timer, checkpoint_info=None, state_dict=None):
a1111_config_filename = find_checkpoint_config(state_dict, checkpoint_info)
a1111_config = OmegaConf.load(a1111_config_filename)
Expand Down Expand Up @@ -232,11 +233,13 @@ def load_model_for_a1111(timer, checkpoint_info=None, state_dict=None):
sd_model.sd_model_checkpoint = checkpoint_info.filename
sd_model.sd_checkpoint_info = checkpoint_info

@torch.inference_mode()
def patched_decode_first_stage(x):
sample = forge_objects.unet.model.model_config.latent_format.process_out(x)
sample = forge_objects.vae.decode(sample).movedim(-1, 1) * 2.0 - 1.0
return sample.to(x)

@torch.inference_mode()
def patched_encode_first_stage(x):
sample = forge_objects.vae.encode(x.movedim(1, -1) * 0.5 + 0.5)
sample = forge_objects.unet.model.model_config.latent_format.process_in(sample)
Expand Down

0 comments on commit 760f727

Please sign in to comment.