Skip to content

Commit

Permalink
add vae test for data preprocess
Browse files Browse the repository at this point in the history
  • Loading branch information
rlsu9 committed Jan 3, 2025
1 parent 830f70e commit 751f912
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 0 deletions.
1 change: 1 addition & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ jobs:

- name: Install dependencies
run: |
pip install setuptools
pip install --upgrade pip
pip install packaging ninja && pip install flash-attn==2.7.0.post2 --no-build-isolation
pip install -e .
Expand Down
45 changes: 45 additions & 0 deletions tests/test_data_preprocess.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import os
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import AutoencoderKLHunyuanVideo
import torch

init_dict = {
"in_channels": 3,
"out_channels": 3,
"latent_channels": 4,
"down_block_types": (
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
),
"up_block_types": (
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
),
"block_out_channels": (8, 8, 8, 8),
"layers_per_block": 1,
"act_fn": "silu",
"norm_num_groups": 4,
"scaling_factor": 0.476986,
"spatial_compression_ratio": 8,
"temporal_compression_ratio": 4,
"mid_block_add_attention": True,
}
os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1"
text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")

model = AutoencoderKLHunyuanVideo(**init_dict)

input_tensor = torch.rand(1, 3, 9, 16, 16)

vae_encoder_output = model.encoder(input_tensor)

# vae_decoder_output = model.decoder(vae_encoder_output)

assert vae_encoder_output.shape == (1,8,3,2,2)

# print(vae_decoder_output.shape)

0 comments on commit 751f912

Please sign in to comment.