Skip to content

Commit

Permalink
[feat]: Add format auto fixer to main branch (#124)
Browse files Browse the repository at this point in the history
  • Loading branch information
rlsu9 authored Dec 31, 2024
1 parent d467c7c commit 0aed186
Show file tree
Hide file tree
Showing 70 changed files with 3,825 additions and 3,374 deletions.
45 changes: 45 additions & 0 deletions .github/workflows/codespell.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
name: codespell

on:
# Trigger the workflow on push or pull request,
# but only for the main branch
push:
branches:
- main
paths:
- "**/*.py"
- "**/*.md"
- "**/*.rst"
- pyproject.toml
- requirements-lint.txt
- .github/workflows/codespell.yml
pull_request:
branches:
- main
paths:
- "**/*.py"
- "**/*.md"
- "**/*.rst"
- pyproject.toml
- requirements-lint.txt
- .github/workflows/codespell.yml

jobs:
codespell:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v3

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.12' # or any version you need
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements-lint.txt
- name: Spelling check with codespell
run: |
# Refer to the above environment variable here
codespell --toml pyproject.toml $CODESPELL_EXCLUDES
50 changes: 50 additions & 0 deletions .github/workflows/ruff.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
name: ruff

on:
# Trigger the workflow on push or pull request,
# but only for the main branch
push:
branches:
- main
paths:
- "**/*.py"
- pyproject.toml
- requirements-lint.txt
- .github/workflows/matchers/ruff.json
- .github/workflows/ruff.yml
pull_request:
branches:
- main
# This workflow is only relevant when one of the following files changes.
# However, we have github configured to expect and require this workflow
# to run and pass before github with auto-merge a pull request. Until github
# allows more flexible auto-merge policy, we can just run this on every PR.
# It doesn't take that long to run, anyway.
#paths:
# - "**/*.py"
# - pyproject.toml
# - requirements-lint.txt
# - .github/workflows/matchers/ruff.json
# - .github/workflows/ruff.yml

jobs:
ruff:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v3

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.12' # or any version you need
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements-lint.txt
- name: Analysing the code with ruff
run: |
ruff check .
- name: Run isort
run: |
isort . --check-only
38 changes: 38 additions & 0 deletions .github/workflows/yapf.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
name: yapf

on:
# Trigger the workflow on push or pull request,
# but only for the main branch
push:
branches:
- main
paths:
- "**/*.py"
- .github/workflows/yapf.yml
pull_request:
branches:
- main
paths:
- "**/*.py"
- .github/workflows/yapf.yml

jobs:
yapf:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v3

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.12' # or any version you need

- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install yapf==0.32.0
pip install toml==0.10.2
- name: Running yapf
run: |
yapf --diff --recursive .
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ Ensure your data is prepared and preprocessed in the format specified in [data_p
```bash
python scripts/huggingface/download_hf.py --repo_id=FastVideo/Mochi-Black-Myth --local_dir=data/Mochi-Black-Myth --repo_type=dataset
```
Download the original model weights as specificed in [Distill Section](#-distill):
Download the original model weights as specified in [Distill Section](#-distill):

Then you can run the finetune with:
```
Expand Down
54 changes: 30 additions & 24 deletions demo/gradio_web_demo.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
import argparse
import os
import tempfile

import gradio as gr
import torch
from fastvideo.models.mochi_hf.pipeline_mochi import MochiPipeline
from fastvideo.models.mochi_hf.modeling_mochi import MochiTransformer3DModel
from diffusers import FlowMatchEulerDiscreteScheduler
from diffusers.utils import export_to_video

from fastvideo.distill.solver import PCMFMScheduler
import tempfile
import os
import argparse
from fastvideo.models.mochi_hf.modeling_mochi import MochiTransformer3DModel
from fastvideo.models.mochi_hf.pipeline_mochi import MochiPipeline


def init_args():
Expand All @@ -21,7 +23,9 @@ def init_args():
parser.add_argument("--model_path", type=str, default="data/mochi")
parser.add_argument("--seed", type=int, default=12345)
parser.add_argument("--transformer_path", type=str, default=None)
parser.add_argument("--scheduler_type", type=str, default="pcm_linear_quadratic")
parser.add_argument("--scheduler_type",
type=str,
default="pcm_linear_quadratic")
parser.add_argument("--lora_checkpoint_dir", type=str, default=None)
parser.add_argument("--shift", type=float, default=8.0)
parser.add_argument("--num_euler_timesteps", type=int, default=50)
Expand All @@ -32,7 +36,6 @@ def init_args():


def load_model(args):
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.scheduler_type == "euler":
scheduler = FlowMatchEulerDiscreteScheduler()
else:
Expand All @@ -47,15 +50,15 @@ def load_model(args):
)

if args.transformer_path:
transformer = MochiTransformer3DModel.from_pretrained(args.transformer_path)
transformer = MochiTransformer3DModel.from_pretrained(
args.transformer_path)
else:
transformer = MochiTransformer3DModel.from_pretrained(
args.model_path, subfolder="transformer/"
)
args.model_path, subfolder="transformer/")

pipe = MochiPipeline.from_pretrained(
args.model_path, transformer=transformer, scheduler=scheduler
)
pipe = MochiPipeline.from_pretrained(args.model_path,
transformer=transformer,
scheduler=scheduler)
pipe.enable_vae_tiling()
# pipe.to(device)
# if args.cpu_offload:
Expand All @@ -76,7 +79,7 @@ def generate_video(
randomize_seed=False,
):
if randomize_seed:
seed = torch.randint(0, 1000000, (1,)).item()
seed = torch.randint(0, 1000000, (1, )).item()

generator = torch.Generator(device="cuda").manual_seed(seed)

Expand Down Expand Up @@ -134,9 +137,11 @@ def generate_video(
step=32,
value=args.height,
)
width = gr.Slider(
label="Width", minimum=256, maximum=1024, step=32, value=args.width
)
width = gr.Slider(label="Width",
minimum=256,
maximum=1024,
step=32,
value=args.width)

with gr.Row():
num_frames = gr.Slider(
Expand All @@ -159,19 +164,20 @@ def generate_video(
)

with gr.Row():
use_negative_prompt = gr.Checkbox(
label="Use negative prompt", value=False
)
use_negative_prompt = gr.Checkbox(label="Use negative prompt",
value=False)
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
)

seed = gr.Slider(
label="Seed", minimum=0, maximum=1000000, step=1, value=args.seed
)
seed = gr.Slider(label="Seed",
minimum=0,
maximum=1000000,
step=1,
value=args.seed)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
seed_output = gr.Number(label="Used Seed")

Expand Down Expand Up @@ -201,4 +207,4 @@ def generate_video(
)

if __name__ == "__main__":
demo.queue(max_size=20).launch(server_name="0.0.0.0", server_port=7860)
demo.queue(max_size=20).launch(server_name="0.0.0.0", server_port=7860)
2 changes: 2 additions & 0 deletions env_setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,7 @@ pip install torch==2.5.0 torchvision --index-url https://download.pytorch.org/wh
# install FA2 and diffusers
pip install packaging ninja && pip install flash-attn==2.7.0.post2 --no-build-isolation

pip install -r requirements-lint.txt

# install fastvideo
pip install -e .
Loading

0 comments on commit 0aed186

Please sign in to comment.