Skip to content

Commit

Permalink
update format
Browse files Browse the repository at this point in the history
  • Loading branch information
rlsu9 committed Jan 3, 2025
1 parent 751f912 commit a10748c
Showing 1 changed file with 43 additions and 31 deletions.
74 changes: 43 additions & 31 deletions tests/test_data_preprocess.py
Original file line number Diff line number Diff line change
@@ -1,45 +1,57 @@
import os
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import AutoencoderKLHunyuanVideo

import torch
from diffusers import AutoencoderKLHunyuanVideo
from transformers import AutoTokenizer, T5EncoderModel

init_dict = {
"in_channels": 3,
"out_channels": 3,
"latent_channels": 4,
"down_block_types": (
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
),
"up_block_types": (
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
),
"block_out_channels": (8, 8, 8, 8),
"layers_per_block": 1,
"act_fn": "silu",
"norm_num_groups": 4,
"scaling_factor": 0.476986,
"spatial_compression_ratio": 8,
"temporal_compression_ratio": 4,
"mid_block_add_attention": True,
}
init_dict = {
"in_channels":
3,
"out_channels":
3,
"latent_channels":
4,
"down_block_types": (
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
),
"up_block_types": (
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
),
"block_out_channels": (8, 8, 8, 8),
"layers_per_block":
1,
"act_fn":
"silu",
"norm_num_groups":
4,
"scaling_factor":
0.476986,
"spatial_compression_ratio":
8,
"temporal_compression_ratio":
4,
"mid_block_add_attention":
True,
}
os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1"
text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")
text_encoder = T5EncoderModel.from_pretrained(
"hf-internal-testing/tiny-random-t5")
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")

model = AutoencoderKLHunyuanVideo(**init_dict)

input_tensor = torch.rand(1, 3, 9, 16, 16)

vae_encoder_output = model.encoder(input_tensor)
vae_encoder_output = model.encoder(input_tensor)

# vae_decoder_output = model.decoder(vae_encoder_output)

assert vae_encoder_output.shape == (1,8,3,2,2)
assert vae_encoder_output.shape == (1, 8, 3, 2, 2)

# print(vae_decoder_output.shape)
# print(vae_decoder_output.shape)

0 comments on commit a10748c

Please sign in to comment.