diff --git a/.github/workflows/pr_quality.yml b/.github/workflows/pr_quality.yml index 8d6e20efe364..4aff798a2aa7 100644 --- a/.github/workflows/pr_quality.yml +++ b/.github/workflows/pr_quality.yml @@ -27,9 +27,8 @@ jobs: pip install .[quality] - name: Check quality run: | - black --check --preview examples tests src utils scripts - isort --check-only examples tests src utils scripts - flake8 examples tests src utils scripts + black --check examples tests src utils scripts + ruff examples tests src utils scripts doc-builder style src/diffusers docs/source --max_len 119 --check_only --path_to_docs docs/source check_repository_consistency: diff --git a/.gitignore b/.gitignore index da087d26f49e..ba8164724610 100644 --- a/.gitignore +++ b/.gitignore @@ -169,3 +169,6 @@ tags # dependencies /transformers + +# ruff +.ruff_cache diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6f725ae87946..9780dae7f7df 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -177,7 +177,7 @@ Follow these steps to start contributing ([supported Python versions](https://gi $ make style ``` - 🧨 Diffusers also uses `flake8` and a few custom scripts to check for coding mistakes. Quality + 🧨 Diffusers also uses `ruff` and a few custom scripts to check for coding mistakes. Quality control runs in CI, however you can also run the same checks with: ```bash diff --git a/Makefile b/Makefile index 8583f59d4110..94af6d2f1272 100644 --- a/Makefile +++ b/Makefile @@ -9,9 +9,8 @@ modified_only_fixup: $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) @if test -n "$(modified_py_files)"; then \ echo "Checking/fixing $(modified_py_files)"; \ - black --preview $(modified_py_files); \ - isort $(modified_py_files); \ - flake8 $(modified_py_files); \ + black $(modified_py_files); \ + ruff $(modified_py_files); \ else \ echo "No library .py files were modified"; \ fi @@ -41,9 +40,8 @@ repo-consistency: # this target runs checks on all files quality: - black --check --preview $(check_dirs) - isort --check-only $(check_dirs) - flake8 $(check_dirs) + black --check $(check_dirs) + ruff $(check_dirs) doc-builder style src/diffusers docs/source --max_len 119 --check_only --path_to_docs docs/source python utils/check_doc_toc.py @@ -57,8 +55,8 @@ extra_style_checks: # this target runs checks on all files and potentially modifies some of them style: - black --preview $(check_dirs) - isort $(check_dirs) + black $(check_dirs) + ruff $(check_dirs) --fix ${MAKE} autogenerate_code ${MAKE} extra_style_checks diff --git a/docs/source/en/conceptual/contribution.mdx b/docs/source/en/conceptual/contribution.mdx index ef6cc6946855..99ea6e9d7e06 100644 --- a/docs/source/en/conceptual/contribution.mdx +++ b/docs/source/en/conceptual/contribution.mdx @@ -177,7 +177,7 @@ Follow these steps to start contributing ([supported Python versions](https://gi $ make style ``` - 🧨 Diffusers also uses `flake8` and a few custom scripts to check for coding mistakes. Quality + 🧨 Diffusers also uses `ruff` and a few custom scripts to check for coding mistakes. Quality control runs in CI, however you can also run the same checks with: ```bash diff --git a/docs/source/en/optimization/fp16.mdx b/docs/source/en/optimization/fp16.mdx index 5b7b32d6208a..ca245568717a 100644 --- a/docs/source/en/optimization/fp16.mdx +++ b/docs/source/en/optimization/fp16.mdx @@ -210,6 +210,7 @@ torch.set_grad_enabled(False) n_experiments = 2 unet_runs_per_experiment = 50 + # load inputs def generate_inputs(): sample = torch.randn(2, 4, 64, 64).half().cuda() @@ -288,6 +289,8 @@ pipe = StableDiffusionPipeline.from_pretrained( # use jitted unet unet_traced = torch.jit.load("unet_traced.pt") + + # del pipe.unet class TracedUNet(torch.nn.Module): def __init__(self): diff --git a/examples/community/bit_diffusion.py b/examples/community/bit_diffusion.py index b27b67c97a36..c778b6cc6c71 100644 --- a/examples/community/bit_diffusion.py +++ b/examples/community/bit_diffusion.py @@ -1,11 +1,11 @@ from typing import Optional, Tuple, Union import torch +from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput -from einops import rearrange, reduce BITS = 8 diff --git a/examples/community/checkpoint_merger.py b/examples/community/checkpoint_merger.py index 32a429b4ab84..576c2cdb4d16 100644 --- a/examples/community/checkpoint_merger.py +++ b/examples/community/checkpoint_merger.py @@ -10,10 +10,11 @@ if is_safetensors_available(): import safetensors.torch +from huggingface_hub import snapshot_download + from diffusers import DiffusionPipeline, __version__ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from diffusers.utils import CONFIG_NAME, DIFFUSERS_CACHE, ONNX_WEIGHTS_NAME, WEIGHTS_NAME -from huggingface_hub import snapshot_download class CheckpointMergerPipeline(DiffusionPipeline): diff --git a/examples/community/clip_guided_stable_diffusion.py b/examples/community/clip_guided_stable_diffusion.py index 07c44a9cbf33..68bdf22f9454 100644 --- a/examples/community/clip_guided_stable_diffusion.py +++ b/examples/community/clip_guided_stable_diffusion.py @@ -4,6 +4,8 @@ import torch from torch import nn from torch.nn import functional as F +from torchvision import transforms +from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -14,8 +16,6 @@ UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput -from torchvision import transforms -from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer class MakeCutouts(nn.Module): diff --git a/examples/community/composable_stable_diffusion.py b/examples/community/composable_stable_diffusion.py index 5b5f36f152ea..966227b466ca 100644 --- a/examples/community/composable_stable_diffusion.py +++ b/examples/community/composable_stable_diffusion.py @@ -16,6 +16,8 @@ from typing import Callable, List, Optional, Union import torch +from packaging import version +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict @@ -29,8 +31,6 @@ PNDMScheduler, ) from diffusers.utils import is_accelerate_available -from packaging import version -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from ...utils import deprecate, logging from . import StableDiffusionPipelineOutput diff --git a/examples/community/imagic_stable_diffusion.py b/examples/community/imagic_stable_diffusion.py index 8e6d6550f6d6..3ca0da0ec061 100644 --- a/examples/community/imagic_stable_diffusion.py +++ b/examples/community/imagic_stable_diffusion.py @@ -7,11 +7,16 @@ from typing import List, Optional, Union import numpy as np +import PIL import torch import torch.nn.functional as F - -import PIL from accelerate import Accelerator + +# TODO: remove and import from diffusers.utils when the new version of diffusers is released +from packaging import version +from tqdm.auto import tqdm +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer + from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput @@ -19,11 +24,6 @@ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, logging -# TODO: remove and import from diffusers.utils when the new version of diffusers is released -from packaging import version -from tqdm.auto import tqdm -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer - if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { diff --git a/examples/community/img2img_inpainting.py b/examples/community/img2img_inpainting.py index cb8071e831b0..71cc22de4b4f 100644 --- a/examples/community/img2img_inpainting.py +++ b/examples/community/img2img_inpainting.py @@ -2,9 +2,10 @@ from typing import Callable, List, Optional, Tuple, Union import numpy as np +import PIL import torch +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer -import PIL from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNet2DConditionModel @@ -12,7 +13,6 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, logging -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/examples/community/interpolate_stable_diffusion.py b/examples/community/interpolate_stable_diffusion.py index f29eef498e41..27d0488760d7 100644 --- a/examples/community/interpolate_stable_diffusion.py +++ b/examples/community/interpolate_stable_diffusion.py @@ -5,6 +5,7 @@ import numpy as np import torch +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict @@ -13,7 +14,6 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, logging -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/examples/community/lpw_stable_diffusion.py b/examples/community/lpw_stable_diffusion.py index 280289e8418e..b4602cc2b905 100644 --- a/examples/community/lpw_stable_diffusion.py +++ b/examples/community/lpw_stable_diffusion.py @@ -3,16 +3,16 @@ from typing import Callable, List, Optional, Union import numpy as np +import PIL import torch +from packaging import version +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer import diffusers -import PIL from diffusers import SchedulerMixin, StableDiffusionPipeline from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker from diffusers.utils import deprecate, logging -from packaging import version -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer try: diff --git a/examples/community/lpw_stable_diffusion_onnx.py b/examples/community/lpw_stable_diffusion_onnx.py index 80204ccb19d0..eae130867056 100644 --- a/examples/community/lpw_stable_diffusion_onnx.py +++ b/examples/community/lpw_stable_diffusion_onnx.py @@ -3,15 +3,15 @@ from typing import Callable, List, Optional, Union import numpy as np +import PIL import torch +from packaging import version +from transformers import CLIPFeatureExtractor, CLIPTokenizer import diffusers -import PIL from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import deprecate, logging -from packaging import version -from transformers import CLIPFeatureExtractor, CLIPTokenizer try: diff --git a/examples/community/magic_mix.py b/examples/community/magic_mix.py index d67aec781c36..b1d69ec84576 100644 --- a/examples/community/magic_mix.py +++ b/examples/community/magic_mix.py @@ -1,6 +1,10 @@ from typing import Union import torch +from PIL import Image +from torchvision import transforms as tfms +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -10,10 +14,6 @@ PNDMScheduler, UNet2DConditionModel, ) -from PIL import Image -from torchvision import transforms as tfms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer class MagicMixPipeline(DiffusionPipeline): diff --git a/examples/community/multilingual_stable_diffusion.py b/examples/community/multilingual_stable_diffusion.py index 7c3c464138c8..587ec3be01fb 100644 --- a/examples/community/multilingual_stable_diffusion.py +++ b/examples/community/multilingual_stable_diffusion.py @@ -2,14 +2,6 @@ from typing import Callable, List, Optional, Union import torch - -from diffusers import DiffusionPipeline -from diffusers.configuration_utils import FrozenDict -from diffusers.models import AutoencoderKL, UNet2DConditionModel -from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from diffusers.utils import deprecate, logging from transformers import ( CLIPFeatureExtractor, CLIPTextModel, @@ -19,6 +11,14 @@ pipeline, ) +from diffusers import DiffusionPipeline +from diffusers.configuration_utils import FrozenDict +from diffusers.models import AutoencoderKL, UNet2DConditionModel +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler +from diffusers.utils import deprecate, logging + logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/examples/community/sd_text2img_k_diffusion.py b/examples/community/sd_text2img_k_diffusion.py index 5c740f517995..c68162475cc4 100755 --- a/examples/community/sd_text2img_k_diffusion.py +++ b/examples/community/sd_text2img_k_diffusion.py @@ -17,11 +17,11 @@ from typing import Callable, List, Optional, Union import torch +from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser from diffusers import DiffusionPipeline, LMSDiscreteScheduler from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import is_accelerate_available, logging -from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/examples/community/seed_resize_stable_diffusion.py b/examples/community/seed_resize_stable_diffusion.py index 6c0f9e5174fc..293a4adf92ca 100644 --- a/examples/community/seed_resize_stable_diffusion.py +++ b/examples/community/seed_resize_stable_diffusion.py @@ -5,6 +5,7 @@ from typing import Callable, List, Optional, Union import torch +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNet2DConditionModel @@ -12,7 +13,6 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/examples/community/speech_to_image_diffusion.py b/examples/community/speech_to_image_diffusion.py index bfa7f869acf2..b22418fb39cc 100644 --- a/examples/community/speech_to_image_diffusion.py +++ b/examples/community/speech_to_image_diffusion.py @@ -2,6 +2,13 @@ from typing import Callable, List, Optional, Union import torch +from transformers import ( + CLIPFeatureExtractor, + CLIPTextModel, + CLIPTokenizer, + WhisperForConditionalGeneration, + WhisperProcessor, +) from diffusers import ( AutoencoderKL, @@ -14,13 +21,6 @@ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging -from transformers import ( - CLIPFeatureExtractor, - CLIPTextModel, - CLIPTokenizer, - WhisperForConditionalGeneration, - WhisperProcessor, -) logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/examples/community/stable_diffusion_comparison.py b/examples/community/stable_diffusion_comparison.py index 33cddd27e7d8..d7e6138da12f 100644 --- a/examples/community/stable_diffusion_comparison.py +++ b/examples/community/stable_diffusion_comparison.py @@ -1,6 +1,7 @@ from typing import Any, Callable, Dict, List, Optional, Union import torch +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -13,7 +14,6 @@ ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer pipe1_model_id = "CompVis/stable-diffusion-v1-1" diff --git a/examples/community/stable_diffusion_mega.py b/examples/community/stable_diffusion_mega.py index be114ca9b183..44b54dd5208d 100644 --- a/examples/community/stable_diffusion_mega.py +++ b/examples/community/stable_diffusion_mega.py @@ -1,8 +1,9 @@ from typing import Any, Callable, Dict, List, Optional, Union +import PIL.Image import torch +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer -import PIL.Image from diffusers import ( AutoencoderKL, DDIMScheduler, @@ -17,7 +18,6 @@ from diffusers.configuration_utils import FrozenDict from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import deprecate, logging -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/examples/community/stable_unclip.py b/examples/community/stable_unclip.py index 9bf175d5a5d1..8ff9c44d19fd 100644 --- a/examples/community/stable_unclip.py +++ b/examples/community/stable_unclip.py @@ -2,13 +2,13 @@ from typing import List, Optional, Tuple, Union import torch +from transformers import CLIPTextModelWithProjection, CLIPTokenizer +from transformers.models.clip.modeling_clip import CLIPTextModelOutput from diffusers.models import PriorTransformer from diffusers.pipelines import DiffusionPipeline, StableDiffusionImageVariationPipeline from diffusers.schedulers import UnCLIPScheduler from diffusers.utils import logging, randn_tensor -from transformers import CLIPTextModelWithProjection, CLIPTokenizer -from transformers.models.clip.modeling_clip import CLIPTextModelOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/examples/community/text_inpainting.py b/examples/community/text_inpainting.py index 8a12044d4c1f..12598138fffc 100644 --- a/examples/community/text_inpainting.py +++ b/examples/community/text_inpainting.py @@ -1,8 +1,15 @@ from typing import Callable, List, Optional, Union +import PIL import torch +from transformers import ( + CLIPFeatureExtractor, + CLIPSegForImageSegmentation, + CLIPSegProcessor, + CLIPTextModel, + CLIPTokenizer, +) -import PIL from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNet2DConditionModel @@ -10,13 +17,6 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging -from transformers import ( - CLIPFeatureExtractor, - CLIPSegForImageSegmentation, - CLIPSegProcessor, - CLIPTextModel, - CLIPTokenizer, -) logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/examples/community/tiled_upscaling.py b/examples/community/tiled_upscaling.py index e873f2590c48..8dc92f5ae818 100644 --- a/examples/community/tiled_upscaling.py +++ b/examples/community/tiled_upscaling.py @@ -16,14 +16,14 @@ from typing import Callable, List, Optional, Union import numpy as np +import PIL import torch +from PIL import Image +from transformers import CLIPTextModel, CLIPTokenizer -import PIL from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler -from PIL import Image -from transformers import CLIPTextModel, CLIPTokenizer def make_transparency_mask(size, overlap_pixels, remove_borders=[]): diff --git a/examples/community/wildcard_stable_diffusion.py b/examples/community/wildcard_stable_diffusion.py index e85c0cc8dbbe..9825c0fec5b3 100644 --- a/examples/community/wildcard_stable_diffusion.py +++ b/examples/community/wildcard_stable_diffusion.py @@ -6,6 +6,7 @@ from typing import Callable, Dict, List, Optional, Union import torch +from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict @@ -14,7 +15,6 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, logging -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/examples/dreambooth/train_dreambooth.py b/examples/dreambooth/train_dreambooth.py index 6d2a8a97cc03..880f4a51f611 100644 --- a/examples/dreambooth/train_dreambooth.py +++ b/examples/dreambooth/train_dreambooth.py @@ -23,28 +23,28 @@ from pathlib import Path from typing import Optional +import accelerate import torch import torch.nn.functional as F import torch.utils.checkpoint -from torch.utils.data import Dataset - -import accelerate -import diffusers import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed -from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from diffusers.utils import check_min_version -from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, create_repo, whoami from packaging import version from PIL import Image +from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig +import diffusers +from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.utils import check_min_version +from diffusers.utils.import_utils import is_xformers_available + # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") diff --git a/examples/dreambooth/train_dreambooth_flax.py b/examples/dreambooth/train_dreambooth_flax.py index 1d80781024ad..d20a0d25c19e 100644 --- a/examples/dreambooth/train_dreambooth_flax.py +++ b/examples/dreambooth/train_dreambooth_flax.py @@ -6,15 +6,24 @@ from pathlib import Path from typing import Optional +import jax +import jax.numpy as jnp import numpy as np +import optax import torch import torch.utils.checkpoint +import transformers +from flax import jax_utils +from flax.training import train_state +from flax.training.common_utils import shard +from huggingface_hub import HfFolder, Repository, create_repo, whoami +from jax.experimental.compilation_cache import compilation_cache as cc +from PIL import Image from torch.utils.data import Dataset +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel, set_seed -import jax -import jax.numpy as jnp -import optax -import transformers from diffusers import ( FlaxAutoencoderKL, FlaxDDPMScheduler, @@ -24,15 +33,6 @@ ) from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker from diffusers.utils import check_min_version -from flax import jax_utils -from flax.training import train_state -from flax.training.common_utils import shard -from huggingface_hub import HfFolder, Repository, create_repo, whoami -from jax.experimental.compilation_cache import compilation_cache as cc -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel, set_seed # Will error if the minimal version of diffusers is not installed. Remove at your own risks. diff --git a/examples/dreambooth/train_dreambooth_lora.py b/examples/dreambooth/train_dreambooth_lora.py index 5d49328f0f02..aa38761f4f8f 100644 --- a/examples/dreambooth/train_dreambooth_lora.py +++ b/examples/dreambooth/train_dreambooth_lora.py @@ -26,13 +26,18 @@ import torch import torch.nn.functional as F import torch.utils.checkpoint -from torch.utils.data import Dataset - -import diffusers import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed +from huggingface_hub import HfFolder, Repository, create_repo, whoami +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import AutoTokenizer, PretrainedConfig + +import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, @@ -45,11 +50,6 @@ from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available -from huggingface_hub import HfFolder, Repository, create_repo, whoami -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import AutoTokenizer, PretrainedConfig # Will error if the minimal version of diffusers is not installed. Remove at your own risks. diff --git a/examples/research_projects/colossalai/train_dreambooth_colossalai.py b/examples/research_projects/colossalai/train_dreambooth_colossalai.py index 17212e84f8d6..6136f7233900 100644 --- a/examples/research_projects/colossalai/train_dreambooth_colossalai.py +++ b/examples/research_projects/colossalai/train_dreambooth_colossalai.py @@ -5,12 +5,10 @@ from pathlib import Path from typing import Optional +import colossalai import torch import torch.nn.functional as F import torch.utils.checkpoint -from torch.utils.data import Dataset - -import colossalai from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.logging import disable_existing_loggers, get_dist_logger @@ -18,14 +16,16 @@ from colossalai.nn.parallel.utils import get_static_torch_model from colossalai.utils import get_current_device from colossalai.utils.model.colo_init_context import ColoInitContext -from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler from huggingface_hub import HfFolder, Repository, create_repo, whoami from PIL import Image +from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig +from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler + disable_existing_loggers() logger = get_dist_logger() diff --git a/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py b/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py index fb217690641f..789440e750f1 100644 --- a/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py +++ b/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint.py @@ -11,11 +11,16 @@ import torch import torch.nn.functional as F import torch.utils.checkpoint -from torch.utils.data import Dataset - from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed +from huggingface_hub import HfFolder, Repository, create_repo, whoami +from PIL import Image, ImageDraw +from torch.utils.data import Dataset +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer + from diffusers import ( AutoencoderKL, DDPMScheduler, @@ -25,11 +30,6 @@ ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version -from huggingface_hub import HfFolder, Repository, create_repo, whoami -from PIL import Image, ImageDraw -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer # Will error if the minimal version of diffusers is not installed. Remove at your own risks. diff --git a/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py b/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py index eade3e7ae6dc..5d6f249d8469 100644 --- a/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py +++ b/examples/research_projects/dreambooth_inpaint/train_dreambooth_inpaint_lora.py @@ -10,22 +10,22 @@ import torch import torch.nn.functional as F import torch.utils.checkpoint -from torch.utils.data import Dataset - from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed +from huggingface_hub import HfFolder, Repository, create_repo, whoami +from PIL import Image, ImageDraw +from torch.utils.data import Dataset +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer + from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.loaders import AttnProcsLayers from diffusers.models.cross_attention import LoRACrossAttnProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available -from huggingface_hub import HfFolder, Repository, create_repo, whoami -from PIL import Image, ImageDraw -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer # Will error if the minimal version of diffusers is not installed. Remove at your own risks. diff --git a/examples/research_projects/intel_opts/inference_bf16.py b/examples/research_projects/intel_opts/inference_bf16.py index 2a4354941f28..8431693a45c8 100644 --- a/examples/research_projects/intel_opts/inference_bf16.py +++ b/examples/research_projects/intel_opts/inference_bf16.py @@ -1,8 +1,8 @@ +import intel_extension_for_pytorch as ipex import torch +from PIL import Image -import intel_extension_for_pytorch as ipex from diffusers import StableDiffusionPipeline -from PIL import Image def image_grid(imgs, rows, cols): diff --git a/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py b/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py index f20db249ecec..f446efc0b0c0 100644 --- a/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py +++ b/examples/research_projects/intel_opts/textual_inversion/textual_inversion_bf16.py @@ -6,30 +6,30 @@ from pathlib import Path from typing import Optional +import intel_extension_for_pytorch as ipex import numpy as np +import PIL import torch import torch.nn.functional as F import torch.utils.checkpoint -from torch.utils.data import Dataset - -import intel_extension_for_pytorch as ipex -import PIL from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed -from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker -from diffusers.utils import check_min_version from huggingface_hub import HfFolder, Repository, create_repo, whoami # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image +from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer +from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker +from diffusers.utils import check_min_version + if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { diff --git a/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py b/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py index 6ec5bef550fd..3865deb2e3a9 100644 --- a/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py +++ b/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py @@ -8,27 +8,27 @@ from pathlib import Path from typing import Optional +import datasets import torch import torch.nn.functional as F import torch.utils.checkpoint -from torch.utils.data import Dataset - -import datasets -import diffusers import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed -from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from diffusers.utils import check_min_version -from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, create_repo, whoami from PIL import Image +from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig +import diffusers +from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.utils import check_min_version +from diffusers.utils.import_utils import is_xformers_available + # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") diff --git a/examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py b/examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py index d32ef08f457d..4bca25167b0e 100644 --- a/examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py +++ b/examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py @@ -21,29 +21,29 @@ from pathlib import Path from typing import Optional +import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint - -import datasets -import diffusers import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import load_dataset -from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from diffusers.training_utils import EMAModel -from diffusers.utils import check_min_version -from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, create_repo, whoami from onnxruntime.training.ortmodule import ORTModule from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer +import diffusers +from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel +from diffusers.utils import check_min_version +from diffusers.utils.import_utils import is_xformers_available + # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") diff --git a/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py b/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py index 0796f0a70890..f54e2d3e3f53 100644 --- a/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py +++ b/examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py @@ -21,19 +21,28 @@ from pathlib import Path from typing import Optional +import datasets import numpy as np +import PIL import torch import torch.nn.functional as F import torch.utils.checkpoint -from torch.utils.data import Dataset - -import datasets -import diffusers -import PIL import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed +from huggingface_hub import HfFolder, Repository, create_repo, whoami +from onnxruntime.training.ortmodule import ORTModule + +# TODO: remove and import from diffusers.utils when the new version of diffusers is released +from packaging import version +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer + +import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, @@ -45,15 +54,6 @@ from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available -from huggingface_hub import HfFolder, Repository, create_repo, whoami -from onnxruntime.training.ortmodule import ORTModule - -# TODO: remove and import from diffusers.utils when the new version of diffusers is released -from packaging import version -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): diff --git a/examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py b/examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py index 52cd99c04624..b9dec9a745e8 100644 --- a/examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py +++ b/examples/research_projects/onnxruntime/unconditional_image_generation/train_unconditional.py @@ -6,23 +6,23 @@ from pathlib import Path from typing import Optional +import datasets import torch import torch.nn.functional as F - -import datasets -import diffusers from accelerate import Accelerator from accelerate.logging import get_logger from datasets import load_dataset -from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel -from diffusers.optimization import get_scheduler -from diffusers.training_utils import EMAModel -from diffusers.utils import check_min_version from huggingface_hub import HfFolder, Repository, create_repo, whoami from onnxruntime.training.ortmodule import ORTModule from torchvision import transforms from tqdm.auto import tqdm +import diffusers +from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel +from diffusers.utils import check_min_version + # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") diff --git a/examples/test_examples.py b/examples/test_examples.py index 06fde2c8a995..d940c6d93b6f 100644 --- a/examples/test_examples.py +++ b/examples/test_examples.py @@ -24,6 +24,7 @@ from typing import List from accelerate.utils import write_basic_config + from diffusers.utils import slow diff --git a/examples/text_to_image/train_text_to_image.py b/examples/text_to_image/train_text_to_image.py index e15781aacec7..39089a85680f 100644 --- a/examples/text_to_image/train_text_to_image.py +++ b/examples/text_to_image/train_text_to_image.py @@ -21,30 +21,30 @@ from pathlib import Path from typing import Optional +import accelerate +import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint - -import accelerate -import datasets -import diffusers import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import load_dataset -from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from diffusers.training_utils import EMAModel -from diffusers.utils import check_min_version, deprecate -from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, create_repo, whoami from packaging import version from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer +import diffusers +from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel +from diffusers.utils import check_min_version, deprecate +from diffusers.utils.import_utils import is_xformers_available + # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") diff --git a/examples/text_to_image/train_text_to_image_flax.py b/examples/text_to_image/train_text_to_image_flax.py index 0850d39de2de..d88594435e65 100644 --- a/examples/text_to_image/train_text_to_image_flax.py +++ b/examples/text_to_image/train_text_to_image_flax.py @@ -6,15 +6,22 @@ from pathlib import Path from typing import Optional -import numpy as np -import torch -import torch.utils.checkpoint - import jax import jax.numpy as jnp +import numpy as np import optax +import torch +import torch.utils.checkpoint import transformers from datasets import load_dataset +from flax import jax_utils +from flax.training import train_state +from flax.training.common_utils import shard +from huggingface_hub import HfFolder, Repository, create_repo, whoami +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel, set_seed + from diffusers import ( FlaxAutoencoderKL, FlaxDDPMScheduler, @@ -24,13 +31,6 @@ ) from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker from diffusers.utils import check_min_version -from flax import jax_utils -from flax.training import train_state -from flax.training.common_utils import shard -from huggingface_hub import HfFolder, Repository, create_repo, whoami -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel, set_seed # Will error if the minimal version of diffusers is not installed. Remove at your own risks. diff --git a/examples/text_to_image/train_text_to_image_lora.py b/examples/text_to_image/train_text_to_image_lora.py index 4f73af8e79cc..b56e0dca536c 100644 --- a/examples/text_to_image/train_text_to_image_lora.py +++ b/examples/text_to_image/train_text_to_image_lora.py @@ -22,28 +22,28 @@ from pathlib import Path from typing import Optional +import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint - -import datasets -import diffusers import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import load_dataset +from huggingface_hub import HfFolder, Repository, create_repo, whoami +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer + +import diffusers from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel from diffusers.loaders import AttnProcsLayers from diffusers.models.cross_attention import LoRACrossAttnProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available -from huggingface_hub import HfFolder, Repository, create_repo, whoami -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer # Will error if the minimal version of diffusers is not installed. Remove at your own risks. diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index e8a4594431ee..254d9e6a0f9c 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -22,17 +22,25 @@ from typing import Optional import numpy as np +import PIL import torch import torch.nn.functional as F import torch.utils.checkpoint -from torch.utils.data import Dataset - -import diffusers -import PIL import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed +from huggingface_hub import HfFolder, Repository, create_repo, whoami + +# TODO: remove and import from diffusers.utils when the new version of diffusers is released +from packaging import version +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer + +import diffusers from diffusers import ( AutoencoderKL, DDPMScheduler, @@ -44,14 +52,6 @@ from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available -from huggingface_hub import HfFolder, Repository, create_repo, whoami - -# TODO: remove and import from diffusers.utils when the new version of diffusers is released -from packaging import version -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): diff --git a/examples/textual_inversion/textual_inversion_flax.py b/examples/textual_inversion/textual_inversion_flax.py index b37d1e2ac499..6ed2c8d24342 100644 --- a/examples/textual_inversion/textual_inversion_flax.py +++ b/examples/textual_inversion/textual_inversion_flax.py @@ -6,25 +6,14 @@ from pathlib import Path from typing import Optional -import numpy as np -import torch -import torch.utils.checkpoint -from torch.utils.data import Dataset - import jax import jax.numpy as jnp +import numpy as np import optax import PIL +import torch +import torch.utils.checkpoint import transformers -from diffusers import ( - FlaxAutoencoderKL, - FlaxDDPMScheduler, - FlaxPNDMScheduler, - FlaxStableDiffusionPipeline, - FlaxUNet2DConditionModel, -) -from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker -from diffusers.utils import check_min_version from flax import jax_utils from flax.training import train_state from flax.training.common_utils import shard @@ -33,10 +22,21 @@ # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image +from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel, set_seed +from diffusers import ( + FlaxAutoencoderKL, + FlaxDDPMScheduler, + FlaxPNDMScheduler, + FlaxStableDiffusionPipeline, + FlaxUNet2DConditionModel, +) +from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker +from diffusers.utils import check_min_version + if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { diff --git a/examples/unconditional_image_generation/train_unconditional.py b/examples/unconditional_image_generation/train_unconditional.py index bdba3e7805be..86d84134f336 100644 --- a/examples/unconditional_image_generation/train_unconditional.py +++ b/examples/unconditional_image_generation/train_unconditional.py @@ -6,24 +6,24 @@ from pathlib import Path from typing import Optional -import torch -import torch.nn.functional as F - import accelerate import datasets -import diffusers +import torch +import torch.nn.functional as F from accelerate import Accelerator from accelerate.logging import get_logger from datasets import load_dataset -from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel -from diffusers.optimization import get_scheduler -from diffusers.training_utils import EMAModel -from diffusers.utils import check_min_version from huggingface_hub import HfFolder, Repository, create_repo, whoami from packaging import version from torchvision import transforms from tqdm.auto import tqdm +import diffusers +from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel +from diffusers.utils import check_min_version + # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.13.0.dev0") diff --git a/pyproject.toml b/pyproject.toml index b7465bb13107..5ec7ae51be15 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,18 @@ [tool.black] line-length = 119 -target-version = ['py36'] +target-version = ['py37'] + +[tool.ruff] +# Never enforce `E501` (line length violations). +ignore = ["E501", "E741", "W605"] +select = ["E", "F", "I", "W"] +line-length = 119 + +# Ignore import violations in all `__init__.py` files. +[tool.ruff.per-file-ignores] +"__init__.py" = ["E402", "F401", "F403", "F811"] +"src/diffusers/utils/dummy_*.py" = ["F401"] + +[tool.ruff.isort] +lines-after-imports = 2 +known-first-party = ["diffusers"] diff --git a/scripts/change_naming_configs_and_checkpoints.py b/scripts/change_naming_configs_and_checkpoints.py index 756bdcccab54..685f7681a326 100644 --- a/scripts/change_naming_configs_and_checkpoints.py +++ b/scripts/change_naming_configs_and_checkpoints.py @@ -19,9 +19,9 @@ import os import torch +from transformers.file_utils import has_file from diffusers import UNet2DConditionModel, UNet2DModel -from transformers.file_utils import has_file do_only_config = False diff --git a/scripts/conversion_ldm_uncond.py b/scripts/conversion_ldm_uncond.py index 67edd638303c..d2ebb3934b66 100644 --- a/scripts/conversion_ldm_uncond.py +++ b/scripts/conversion_ldm_uncond.py @@ -1,8 +1,8 @@ import argparse +import OmegaConf import torch -import OmegaConf from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel diff --git a/scripts/convert_dance_diffusion_to_diffusers.py b/scripts/convert_dance_diffusion_to_diffusers.py index db183bce9262..d53d1f792e89 100755 --- a/scripts/convert_dance_diffusion_to_diffusers.py +++ b/scripts/convert_dance_diffusion_to_diffusers.py @@ -5,11 +5,11 @@ from copy import deepcopy import torch +from audio_diffusion.models import DiffusionAttnUnet1D +from diffusion import sampling from torch import nn -from audio_diffusion.models import DiffusionAttnUnet1D from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel -from diffusion import sampling MODELS_MAP = { diff --git a/scripts/convert_diffusers_to_original_stable_diffusion.py b/scripts/convert_diffusers_to_original_stable_diffusion.py index 5ca98469140b..9da45211551e 100644 --- a/scripts/convert_diffusers_to_original_stable_diffusion.py +++ b/scripts/convert_diffusers_to_original_stable_diffusion.py @@ -7,7 +7,6 @@ import re import torch - from safetensors.torch import load_file, save_file diff --git a/scripts/convert_dit_to_diffusers.py b/scripts/convert_dit_to_diffusers.py index e14b4ad2a7bc..dc127f69555c 100644 --- a/scripts/convert_dit_to_diffusers.py +++ b/scripts/convert_dit_to_diffusers.py @@ -2,9 +2,9 @@ import os import torch +from torchvision.datasets.utils import download_url from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, Transformer2DModel -from torchvision.datasets.utils import download_url pretrained_models = {512: "DiT-XL-2-512x512.pt", 256: "DiT-XL-2-256x256.pt"} diff --git a/scripts/convert_k_upscaler_to_diffusers.py b/scripts/convert_k_upscaler_to_diffusers.py index 457d921990fa..62abedd73785 100644 --- a/scripts/convert_k_upscaler_to_diffusers.py +++ b/scripts/convert_k_upscaler_to_diffusers.py @@ -1,9 +1,9 @@ import argparse -import torch - import huggingface_hub import k_diffusion as K +import torch + from diffusers import UNet2DConditionModel diff --git a/scripts/convert_kakao_brain_unclip_to_diffusers.py b/scripts/convert_kakao_brain_unclip_to_diffusers.py index 59bcd2c684eb..85d983dea686 100644 --- a/scripts/convert_kakao_brain_unclip_to_diffusers.py +++ b/scripts/convert_kakao_brain_unclip_to_diffusers.py @@ -2,13 +2,13 @@ import tempfile import torch - from accelerate import load_checkpoint_and_dispatch +from transformers import CLIPTextModelWithProjection, CLIPTokenizer + from diffusers import UnCLIPPipeline, UNet2DConditionModel, UNet2DModel from diffusers.models.prior_transformer import PriorTransformer from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.schedulers.scheduling_unclip import UnCLIPScheduler -from transformers import CLIPTextModelWithProjection, CLIPTokenizer """ @@ -249,7 +249,6 @@ def prior_ff_to_diffusers(checkpoint, *, diffusers_ff_prefix, original_ff_prefix "class_embed_type": "identity", "attention_head_dim": 64, "resnet_time_scale_shift": "scale_shift", - "class_embed_type": "identity", } diff --git a/scripts/convert_ldm_original_checkpoint_to_diffusers.py b/scripts/convert_ldm_original_checkpoint_to_diffusers.py index f547e96f4ed5..b82df4c08dc3 100644 --- a/scripts/convert_ldm_original_checkpoint_to_diffusers.py +++ b/scripts/convert_ldm_original_checkpoint_to_diffusers.py @@ -355,5 +355,5 @@ def convert_ldm_checkpoint(checkpoint, config): pipe = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) - except: + except: # noqa: E722 model.save_pretrained(args.dump_path) diff --git a/scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py b/scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py index 271359b80479..144701ec19af 100644 --- a/scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py +++ b/scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py @@ -181,5 +181,5 @@ def set_resnet_weights(new_layer, old_checkpoint, index): pipe = ScoreSdeVePipeline(unet=model, scheduler=scheduler) pipe.save_pretrained(args.dump_path) - except: + except: # noqa: E722 model.save_pretrained(args.dump_path) diff --git a/scripts/convert_stable_diffusion_checkpoint_to_onnx.py b/scripts/convert_stable_diffusion_checkpoint_to_onnx.py index 7a2a682d3416..e2d28050709e 100644 --- a/scripts/convert_stable_diffusion_checkpoint_to_onnx.py +++ b/scripts/convert_stable_diffusion_checkpoint_to_onnx.py @@ -17,12 +17,12 @@ import shutil from pathlib import Path +import onnx import torch +from packaging import version from torch.onnx import export -import onnx from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline -from packaging import version is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") diff --git a/scripts/convert_unclip_txt2img_to_image_variation.py b/scripts/convert_unclip_txt2img_to_image_variation.py index d228a537ed4c..07f8ebf2a3d0 100644 --- a/scripts/convert_unclip_txt2img_to_image_variation.py +++ b/scripts/convert_unclip_txt2img_to_image_variation.py @@ -1,8 +1,9 @@ import argparse -from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection +from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline + if __name__ == "__main__": parser = argparse.ArgumentParser() diff --git a/scripts/convert_vae_pt_to_diffusers.py b/scripts/convert_vae_pt_to_diffusers.py index ca79c8a0c6ac..4762ffcf8d00 100644 --- a/scripts/convert_vae_pt_to_diffusers.py +++ b/scripts/convert_vae_pt_to_diffusers.py @@ -1,9 +1,10 @@ import argparse import io +import requests import torch +from omegaconf import OmegaConf -import requests from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, @@ -12,7 +13,6 @@ renew_vae_attention_paths, renew_vae_resnet_paths, ) -from omegaconf import OmegaConf def custom_convert_ldm_vae_checkpoint(checkpoint, config): diff --git a/scripts/convert_versatile_diffusion_to_diffusers.py b/scripts/convert_versatile_diffusion_to_diffusers.py index 8a1cdbdc2fa3..cb1ad4cc3f60 100644 --- a/scripts/convert_versatile_diffusion_to_diffusers.py +++ b/scripts/convert_versatile_diffusion_to_diffusers.py @@ -18,6 +18,12 @@ from argparse import Namespace import torch +from transformers import ( + CLIPFeatureExtractor, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionModelWithProjection, +) from diffusers import ( AutoencoderKL, @@ -31,12 +37,6 @@ VersatileDiffusionPipeline, ) from diffusers.pipelines.versatile_diffusion.modeling_text_unet import UNetFlatConditionModel -from transformers import ( - CLIPFeatureExtractor, - CLIPTextModelWithProjection, - CLIPTokenizer, - CLIPVisionModelWithProjection, -) SCHEDULER_CONFIG = Namespace( diff --git a/scripts/convert_vq_diffusion_to_diffusers.py b/scripts/convert_vq_diffusion_to_diffusers.py index 85db67844acd..58ed2d93d5df 100644 --- a/scripts/convert_vq_diffusion_to_diffusers.py +++ b/scripts/convert_vq_diffusion_to_diffusers.py @@ -36,14 +36,14 @@ import tempfile import torch - import yaml from accelerate import init_empty_weights, load_checkpoint_and_dispatch -from diffusers import Transformer2DModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel -from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from transformers import CLIPTextModel, CLIPTokenizer from yaml.loader import FullLoader +from diffusers import Transformer2DModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel +from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings + try: from omegaconf import OmegaConf diff --git a/scripts/generate_logits.py b/scripts/generate_logits.py index 531e2e4d71c5..89dce0e78d4e 100644 --- a/scripts/generate_logits.py +++ b/scripts/generate_logits.py @@ -1,9 +1,9 @@ import random import torch +from huggingface_hub import HfApi from diffusers import UNet2DModel -from huggingface_hub import HfApi api = HfApi() diff --git a/setup.py b/setup.py index 80acc539144b..5e38ab6ef901 100644 --- a/setup.py +++ b/setup.py @@ -80,10 +80,9 @@ _deps = [ "Pillow", # keep the PIL.Image.Resampling deprecation away "accelerate>=0.11.0", - "black==22.12", + "black~=23.1", "datasets", "filelock", - "flake8>=3.8.3", "flax>=0.4.1", "hf-doc-builder>=0.3.0", "huggingface-hub>=0.10.0", @@ -99,6 +98,7 @@ "pytest", "pytest-timeout", "pytest-xdist", + "ruff>=0.0.241", "safetensors", "sentencepiece>=0.1.91,!=0.1.92", "scipy", @@ -178,7 +178,7 @@ def run(self): extras = {} -extras["quality"] = deps_list("black", "isort", "flake8", "hf-doc-builder") +extras["quality"] = deps_list("black", "isort", "ruff", "hf-doc-builder") extras["docs"] = deps_list("hf-doc-builder") extras["training"] = deps_list("accelerate", "datasets", "tensorboard", "Jinja2") extras["test"] = deps_list( diff --git a/src/diffusers/configuration_utils.py b/src/diffusers/configuration_utils.py index 52d90a07bd06..4191aa0b56a6 100644 --- a/src/diffusers/configuration_utils.py +++ b/src/diffusers/configuration_utils.py @@ -26,7 +26,6 @@ from typing import Any, Dict, Tuple, Union import numpy as np - from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from requests import HTTPError diff --git a/src/diffusers/dependency_versions_table.py b/src/diffusers/dependency_versions_table.py index 2fb6959e3cdc..a84d33706ef8 100644 --- a/src/diffusers/dependency_versions_table.py +++ b/src/diffusers/dependency_versions_table.py @@ -4,10 +4,9 @@ deps = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", - "black": "black==22.12", + "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", - "flake8": "flake8>=3.8.3", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.10.0", @@ -23,6 +22,7 @@ "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", + "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", diff --git a/src/diffusers/experimental/rl/value_guided_sampling.py b/src/diffusers/experimental/rl/value_guided_sampling.py index 9a616a209f11..d10062da6025 100644 --- a/src/diffusers/experimental/rl/value_guided_sampling.py +++ b/src/diffusers/experimental/rl/value_guided_sampling.py @@ -14,7 +14,6 @@ import numpy as np import torch - import tqdm from ...models.unet_1d import UNet1DModel @@ -57,13 +56,13 @@ def __init__( for key in self.data.keys(): try: self.means[key] = self.data[key].mean() - except: + except: # noqa: E722 pass self.stds = dict() for key in self.data.keys(): try: self.stds[key] = self.data[key].std() - except: + except: # noqa: E722 pass self.state_dim = env.observation_space.shape[0] self.action_dim = env.action_space.shape[0] diff --git a/src/diffusers/models/modeling_pytorch_flax_utils.py b/src/diffusers/models/modeling_pytorch_flax_utils.py index 46bb7740592a..158ef7cb8723 100644 --- a/src/diffusers/models/modeling_pytorch_flax_utils.py +++ b/src/diffusers/models/modeling_pytorch_flax_utils.py @@ -16,10 +16,9 @@ from pickle import UnpicklingError -import numpy as np - import jax import jax.numpy as jnp +import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict diff --git a/src/diffusers/models/modeling_utils.py b/src/diffusers/models/modeling_utils.py index 30048b659034..f71d1d769699 100644 --- a/src/diffusers/models/modeling_utils.py +++ b/src/diffusers/models/modeling_utils.py @@ -20,11 +20,10 @@ from typing import Callable, List, Optional, Tuple, Union import torch -from torch import Tensor, device - from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from requests import HTTPError +from torch import Tensor, device from .. import __version__ from ..utils import ( @@ -500,7 +499,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P subfolder=subfolder, user_agent=user_agent, ) - except: + except: # noqa: E722 pass if model_file is None: model_file = _get_model_file( diff --git a/src/diffusers/pipelines/alt_diffusion/__init__.py b/src/diffusers/pipelines/alt_diffusion/__init__.py index 09d0d9b7852c..dab2d8db1045 100644 --- a/src/diffusers/pipelines/alt_diffusion/__init__.py +++ b/src/diffusers/pipelines/alt_diffusion/__init__.py @@ -2,7 +2,6 @@ from typing import List, Optional, Union import numpy as np - import PIL from PIL import Image diff --git a/src/diffusers/pipelines/alt_diffusion/modeling_roberta_series.py b/src/diffusers/pipelines/alt_diffusion/modeling_roberta_series.py index 2e92314162d3..637d6dd18698 100644 --- a/src/diffusers/pipelines/alt_diffusion/modeling_roberta_series.py +++ b/src/diffusers/pipelines/alt_diffusion/modeling_roberta_series.py @@ -3,7 +3,6 @@ import torch from torch import nn - from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput diff --git a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py index fb3ad40d5ac1..5a2b040a3a7d 100644 --- a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py +++ b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion.py @@ -16,11 +16,11 @@ from typing import Any, Callable, Dict, List, Optional, Union import torch - -from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPFeatureExtractor, XLMRobertaTokenizer +from diffusers.utils import is_accelerate_available + from ...configuration_utils import FrozenDict from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers diff --git a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py index 003189c816de..ac485f0c9ca0 100644 --- a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +++ b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -16,13 +16,13 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL -from diffusers.utils import is_accelerate_available +import torch from packaging import version from transformers import CLIPFeatureExtractor, XLMRobertaTokenizer +from diffusers.utils import is_accelerate_available + from ...configuration_utils import FrozenDict from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import KarrasDiffusionSchedulers diff --git a/src/diffusers/pipelines/audio_diffusion/__init__.py b/src/diffusers/pipelines/audio_diffusion/__init__.py index 37f83bc91487..58554c45ea52 100644 --- a/src/diffusers/pipelines/audio_diffusion/__init__.py +++ b/src/diffusers/pipelines/audio_diffusion/__init__.py @@ -1,3 +1,2 @@ -# flake8: noqa from .mel import Mel from .pipeline_audio_diffusion import AudioDiffusionPipeline diff --git a/src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py b/src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py index d814c16b0857..f8a7c29f62bf 100644 --- a/src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py +++ b/src/diffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py @@ -18,7 +18,6 @@ import numpy as np import torch - from PIL import Image from ...models import AutoencoderKL, UNet2DConditionModel diff --git a/src/diffusers/pipelines/dance_diffusion/__init__.py b/src/diffusers/pipelines/dance_diffusion/__init__.py index 2ad34fc52aaa..55d7f8ff9807 100644 --- a/src/diffusers/pipelines/dance_diffusion/__init__.py +++ b/src/diffusers/pipelines/dance_diffusion/__init__.py @@ -1,2 +1 @@ -# flake8: noqa from .pipeline_dance_diffusion import DanceDiffusionPipeline diff --git a/src/diffusers/pipelines/ddim/__init__.py b/src/diffusers/pipelines/ddim/__init__.py index 8fd31868a88a..85e8118e75e7 100644 --- a/src/diffusers/pipelines/ddim/__init__.py +++ b/src/diffusers/pipelines/ddim/__init__.py @@ -1,2 +1 @@ -# flake8: noqa from .pipeline_ddim import DDIMPipeline diff --git a/src/diffusers/pipelines/ddpm/__init__.py b/src/diffusers/pipelines/ddpm/__init__.py index 8889bdae1224..bb228ee012e8 100644 --- a/src/diffusers/pipelines/ddpm/__init__.py +++ b/src/diffusers/pipelines/ddpm/__init__.py @@ -1,2 +1 @@ -# flake8: noqa from .pipeline_ddpm import DDPMPipeline diff --git a/src/diffusers/pipelines/latent_diffusion/__init__.py b/src/diffusers/pipelines/latent_diffusion/__init__.py index 5544527ff587..0cce9a89bcbe 100644 --- a/src/diffusers/pipelines/latent_diffusion/__init__.py +++ b/src/diffusers/pipelines/latent_diffusion/__init__.py @@ -1,4 +1,3 @@ -# flake8: noqa from ...utils import is_transformers_available from .pipeline_latent_diffusion_superresolution import LDMSuperResolutionPipeline diff --git a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py index 35c993ff768f..d27244e3bacc 100644 --- a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py +++ b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py @@ -18,7 +18,6 @@ import torch import torch.nn as nn import torch.utils.checkpoint - from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutput diff --git a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py index 1e9bea273c85..4a6e56d47bd8 100644 --- a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py +++ b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py @@ -2,11 +2,10 @@ from typing import List, Optional, Tuple, Union import numpy as np +import PIL import torch import torch.utils.checkpoint -import PIL - from ...models import UNet2DModel, VQModel from ...schedulers import ( DDIMScheduler, diff --git a/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py b/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py index 0826ca7536c7..1b9fc5270a62 100644 --- a/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py +++ b/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py @@ -1,2 +1 @@ -# flake8: noqa from .pipeline_latent_diffusion_uncond import LDMPipeline diff --git a/src/diffusers/pipelines/onnx_utils.py b/src/diffusers/pipelines/onnx_utils.py index 9308a1878845..f414524f0a29 100644 --- a/src/diffusers/pipelines/onnx_utils.py +++ b/src/diffusers/pipelines/onnx_utils.py @@ -21,7 +21,6 @@ from typing import Optional, Union import numpy as np - from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging diff --git a/src/diffusers/pipelines/paint_by_example/__init__.py b/src/diffusers/pipelines/paint_by_example/__init__.py index e234139beba1..f0fc8cb71e3f 100644 --- a/src/diffusers/pipelines/paint_by_example/__init__.py +++ b/src/diffusers/pipelines/paint_by_example/__init__.py @@ -2,7 +2,6 @@ from typing import List, Optional, Union import numpy as np - import PIL from PIL import Image diff --git a/src/diffusers/pipelines/paint_by_example/image_encoder.py b/src/diffusers/pipelines/paint_by_example/image_encoder.py index e83f638c60ac..df577e1678b5 100644 --- a/src/diffusers/pipelines/paint_by_example/image_encoder.py +++ b/src/diffusers/pipelines/paint_by_example/image_encoder.py @@ -13,7 +13,6 @@ # limitations under the License. import torch from torch import nn - from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock diff --git a/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py b/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py index 7cc9c14d78c7..bc6d90d4d3a6 100644 --- a/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +++ b/src/diffusers/pipelines/paint_by_example/pipeline_paint_by_example.py @@ -16,11 +16,11 @@ from typing import Callable, List, Optional, Union import numpy as np +import PIL import torch +from transformers import CLIPFeatureExtractor -import PIL from diffusers.utils import is_accelerate_available -from transformers import CLIPFeatureExtractor from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler diff --git a/src/diffusers/pipelines/pipeline_flax_utils.py b/src/diffusers/pipelines/pipeline_flax_utils.py index 1922d0fad6e5..7a22248bf168 100644 --- a/src/diffusers/pipelines/pipeline_flax_utils.py +++ b/src/diffusers/pipelines/pipeline_flax_utils.py @@ -19,9 +19,8 @@ import os from typing import Any, Dict, List, Optional, Union -import numpy as np - import flax +import numpy as np import PIL from flax.core.frozen_dict import FrozenDict from huggingface_hub import snapshot_download diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 1801fb1a2869..b6cf92abfcdf 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -22,15 +22,15 @@ from typing import Any, Callable, Dict, List, Optional, Union import numpy as np -import torch - -import diffusers import PIL +import torch from huggingface_hub import model_info, snapshot_download from packaging import version from PIL import Image from tqdm.auto import tqdm +import diffusers + from ..configuration_utils import ConfigMixin from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT from ..schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME diff --git a/src/diffusers/pipelines/pndm/__init__.py b/src/diffusers/pipelines/pndm/__init__.py index 6fc46aaab9fa..488eb4f5f2b2 100644 --- a/src/diffusers/pipelines/pndm/__init__.py +++ b/src/diffusers/pipelines/pndm/__init__.py @@ -1,2 +1 @@ -# flake8: noqa from .pipeline_pndm import PNDMPipeline diff --git a/src/diffusers/pipelines/repaint/pipeline_repaint.py b/src/diffusers/pipelines/repaint/pipeline_repaint.py index a3298e72399a..5cd77241f51d 100644 --- a/src/diffusers/pipelines/repaint/pipeline_repaint.py +++ b/src/diffusers/pipelines/repaint/pipeline_repaint.py @@ -16,9 +16,8 @@ from typing import List, Optional, Tuple, Union import numpy as np -import torch - import PIL +import torch from ...models import UNet2DModel from ...schedulers import RePaintScheduler diff --git a/src/diffusers/pipelines/score_sde_ve/__init__.py b/src/diffusers/pipelines/score_sde_ve/__init__.py index 000d61f6e9b1..c7c2a85c067b 100644 --- a/src/diffusers/pipelines/score_sde_ve/__init__.py +++ b/src/diffusers/pipelines/score_sde_ve/__init__.py @@ -1,2 +1 @@ -# flake8: noqa from .pipeline_score_sde_ve import ScoreSdeVePipeline diff --git a/src/diffusers/pipelines/stable_diffusion/__init__.py b/src/diffusers/pipelines/stable_diffusion/__init__.py index 9ce3662c82b1..bf07127cde5b 100644 --- a/src/diffusers/pipelines/stable_diffusion/__init__.py +++ b/src/diffusers/pipelines/stable_diffusion/__init__.py @@ -2,7 +2,6 @@ from typing import List, Optional, Union import numpy as np - import PIL from PIL import Image diff --git a/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py b/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py index 45788a8e2954..338c1d3584d9 100644 --- a/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +++ b/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py @@ -18,9 +18,10 @@ import re import tempfile +import requests import torch +from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig -import requests from diffusers import ( AutoencoderKL, DDIMScheduler, @@ -37,7 +38,6 @@ from diffusers.pipelines.latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder, PaintByExamplePipeline from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker -from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig from ...utils import is_omegaconf_available, is_safetensors_available from ...utils.import_utils import BACKENDS_MAPPING diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py index d7a3a41e6ab0..703bfc537341 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py @@ -16,13 +16,13 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL -from diffusers.utils import is_accelerate_available +import torch from packaging import version from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer +from diffusers.utils import is_accelerate_available + from ...configuration_utils import FrozenDict from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import DDIMScheduler diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py index 901b4cbdffe8..0aed9966a97e 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion.py @@ -16,10 +16,9 @@ from functools import partial from typing import Dict, List, Optional, Union -import numpy as np - import jax import jax.numpy as jnp +import numpy as np from flax.core.frozen_dict import FrozenDict from flax.jax_utils import unreplicate from flax.training.common_utils import shard diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py index f555b30b5a61..4144cb511067 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_img2img.py @@ -16,10 +16,9 @@ from functools import partial from typing import Dict, List, Optional, Union -import numpy as np - import jax import jax.numpy as jnp +import numpy as np from flax.core.frozen_dict import FrozenDict from flax.jax_utils import unreplicate from flax.training.common_utils import shard diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py index 5a31309f1a5d..1846b244d6cd 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_flax_stable_diffusion_inpaint.py @@ -16,10 +16,9 @@ from functools import partial from typing import Dict, List, Optional, Union -import numpy as np - import jax import jax.numpy as jnp +import numpy as np from flax.core.frozen_dict import FrozenDict from flax.jax_utils import unreplicate from flax.training.common_utils import shard diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py index aacca71f6369..6cfbca8eb129 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion.py @@ -17,7 +17,6 @@ import numpy as np import torch - from transformers import CLIPFeatureExtractor, CLIPTokenizer from ...configuration_utils import FrozenDict diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py index 473d532c124c..277025857e77 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py @@ -16,9 +16,8 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL +import torch from transformers import CLIPFeatureExtractor, CLIPTokenizer from ...configuration_utils import FrozenDict diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py index 5726e21b16a9..e7e3dc0ae836 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint.py @@ -16,9 +16,8 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL +import torch from transformers import CLIPFeatureExtractor, CLIPTokenizer from ...configuration_utils import FrozenDict diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py index 1b4648c93543..df22652826ae 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -2,9 +2,8 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL +import torch from transformers import CLIPFeatureExtractor, CLIPTokenizer from ...configuration_utils import FrozenDict diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py index dd878dab2be2..2a4268d815a6 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py @@ -16,7 +16,6 @@ from typing import Any, Callable, Dict, List, Optional, Union import torch - from packaging import version from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py index ac51582c8f60..9d663de47ff5 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_depth2img.py @@ -17,9 +17,8 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL +import torch from packaging import version from transformers import CLIPTextModel, CLIPTokenizer, DPTFeatureExtractor, DPTForDepthEstimation diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py index 8c0733872a07..fb5d5da16688 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_image_variation.py @@ -15,9 +15,8 @@ import inspect from typing import Callable, List, Optional, Union -import torch - import PIL +import torch from packaging import version from transformers import CLIPFeatureExtractor, CLIPVisionModelWithProjection diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py index 3f988c31dd17..e73c946133ee 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py @@ -16,9 +16,8 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL +import torch from packaging import version from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py index 7ab0438cb294..649530393909 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py @@ -16,9 +16,8 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL +import torch from packaging import version from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py index 148cc2970c3a..689886c51d45 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py @@ -16,9 +16,8 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL +import torch from packaging import version from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py index 2a201dc4417b..fbb5311f5444 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py @@ -16,9 +16,8 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL +import torch from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from ...models import AutoencoderKL, UNet2DConditionModel diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py index 40aa2e9c22f3..5d4f64cc96d7 100755 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_k_diffusion.py @@ -16,7 +16,6 @@ from typing import Callable, List, Optional, Union import torch - from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser from ...pipelines import DiffusionPipeline diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py index d025f1042266..4a0ce91e96c1 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_latent_upscale.py @@ -15,10 +15,9 @@ from typing import Callable, List, Optional, Union import numpy as np +import PIL import torch import torch.nn.functional as F - -import PIL from transformers import CLIPTextModel, CLIPTokenizer from ...models import AutoencoderKL, UNet2DConditionModel diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py index c9773a8e87a4..07714d2c484e 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py @@ -16,9 +16,8 @@ from typing import Callable, List, Optional, Union import numpy as np -import torch - import PIL +import torch from transformers import CLIPTextModel, CLIPTokenizer from ...models import AutoencoderKL, UNet2DConditionModel diff --git a/src/diffusers/pipelines/stable_diffusion/safety_checker.py b/src/diffusers/pipelines/stable_diffusion/safety_checker.py index 278960809211..2e20c31b6466 100644 --- a/src/diffusers/pipelines/stable_diffusion/safety_checker.py +++ b/src/diffusers/pipelines/stable_diffusion/safety_checker.py @@ -15,7 +15,6 @@ import numpy as np import torch import torch.nn as nn - from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging diff --git a/src/diffusers/pipelines/stable_diffusion_safe/__init__.py b/src/diffusers/pipelines/stable_diffusion_safe/__init__.py index 59ff61fa3b54..5aecfeac112e 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/__init__.py +++ b/src/diffusers/pipelines/stable_diffusion_safe/__init__.py @@ -3,7 +3,6 @@ from typing import List, Optional, Union import numpy as np - import PIL from PIL import Image diff --git a/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py b/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py index 5ce69783ba89..e27cab7a9296 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py +++ b/src/diffusers/pipelines/stable_diffusion_safe/pipeline_stable_diffusion_safe.py @@ -4,7 +4,6 @@ import numpy as np import torch - from packaging import version from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer diff --git a/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py b/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py index f608cd17435d..6fc11b098547 100644 --- a/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py +++ b/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py @@ -14,7 +14,6 @@ import torch import torch.nn as nn - from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging diff --git a/src/diffusers/pipelines/stochastic_karras_ve/__init__.py b/src/diffusers/pipelines/stochastic_karras_ve/__init__.py index db2582043781..5a63c1d24afb 100644 --- a/src/diffusers/pipelines/stochastic_karras_ve/__init__.py +++ b/src/diffusers/pipelines/stochastic_karras_ve/__init__.py @@ -1,2 +1 @@ -# flake8: noqa from .pipeline_stochastic_karras_ve import KarrasVePipeline diff --git a/src/diffusers/pipelines/unclip/pipeline_unclip.py b/src/diffusers/pipelines/unclip/pipeline_unclip.py index b68c6626709a..5f516e76dea6 100644 --- a/src/diffusers/pipelines/unclip/pipeline_unclip.py +++ b/src/diffusers/pipelines/unclip/pipeline_unclip.py @@ -17,7 +17,6 @@ import torch from torch.nn import functional as F - from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput diff --git a/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py b/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py index c5af8f6471fb..9a76b9f1fb84 100644 --- a/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py +++ b/src/diffusers/pipelines/unclip/pipeline_unclip_image_variation.py @@ -15,10 +15,9 @@ import inspect from typing import List, Optional, Union +import PIL import torch from torch.nn import functional as F - -import PIL from transformers import ( CLIPFeatureExtractor, CLIPTextModelWithProjection, diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py index ec8be907bb7a..84e46217878b 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py +++ b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion.py @@ -1,9 +1,8 @@ import inspect from typing import Callable, List, Optional, Union -import torch - import PIL.Image +import torch from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer, CLIPVisionModel from ...models import AutoencoderKL, UNet2DConditionModel diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py index 8ed4456cede7..24ca9666e272 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py +++ b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_dual_guided.py @@ -16,10 +16,9 @@ from typing import Callable, List, Optional, Tuple, Union import numpy as np +import PIL import torch import torch.utils.checkpoint - -import PIL from transformers import ( CLIPFeatureExtractor, CLIPTextModelWithProjection, diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py index bec338540f43..b0865915f033 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py +++ b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_image_variation.py @@ -16,10 +16,9 @@ from typing import Callable, List, Optional, Union import numpy as np +import PIL import torch import torch.utils.checkpoint - -import PIL from transformers import CLIPFeatureExtractor, CLIPVisionModelWithProjection from ...models import AutoencoderKL, UNet2DConditionModel diff --git a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py index 5b771265c9e6..c52509c52869 100644 --- a/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +++ b/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py @@ -17,7 +17,6 @@ import torch import torch.utils.checkpoint - from transformers import CLIPFeatureExtractor, CLIPTextModelWithProjection, CLIPTokenizer from ...models import AutoencoderKL, Transformer2DModel, UNet2DConditionModel diff --git a/src/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py b/src/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py index 5436f33000a1..a26704e60f8b 100644 --- a/src/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py +++ b/src/diffusers/pipelines/vq_diffusion/pipeline_vq_diffusion.py @@ -15,7 +15,6 @@ from typing import Callable, List, Optional, Tuple, Union import torch - from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config diff --git a/src/diffusers/schedulers/scheduling_lms_discrete.py b/src/diffusers/schedulers/scheduling_lms_discrete.py index f2c474ffe11c..88537a32df53 100644 --- a/src/diffusers/schedulers/scheduling_lms_discrete.py +++ b/src/diffusers/schedulers/scheduling_lms_discrete.py @@ -17,7 +17,6 @@ import numpy as np import torch - from scipy import integrate from ..configuration_utils import ConfigMixin, register_to_config diff --git a/src/diffusers/utils/dummy_flax_and_transformers_objects.py b/src/diffusers/utils/dummy_flax_and_transformers_objects.py index 4b81fb562ebb..5db4c7d58d1e 100644 --- a/src/diffusers/utils/dummy_flax_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_flax_and_transformers_objects.py @@ -1,6 +1,4 @@ # This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - from ..utils import DummyObject, requires_backends diff --git a/src/diffusers/utils/dummy_flax_objects.py b/src/diffusers/utils/dummy_flax_objects.py index 8e308bb41bea..7772c1a06b49 100644 --- a/src/diffusers/utils/dummy_flax_objects.py +++ b/src/diffusers/utils/dummy_flax_objects.py @@ -1,6 +1,4 @@ # This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - from ..utils import DummyObject, requires_backends diff --git a/src/diffusers/utils/dummy_onnx_objects.py b/src/diffusers/utils/dummy_onnx_objects.py index 963906b24c36..bde5f6ad0793 100644 --- a/src/diffusers/utils/dummy_onnx_objects.py +++ b/src/diffusers/utils/dummy_onnx_objects.py @@ -1,6 +1,4 @@ # This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - from ..utils import DummyObject, requires_backends diff --git a/src/diffusers/utils/dummy_pt_objects.py b/src/diffusers/utils/dummy_pt_objects.py index 1e7c0a46a2b2..546992bc436e 100644 --- a/src/diffusers/utils/dummy_pt_objects.py +++ b/src/diffusers/utils/dummy_pt_objects.py @@ -1,6 +1,4 @@ # This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - from ..utils import DummyObject, requires_backends diff --git a/src/diffusers/utils/dummy_torch_and_librosa_objects.py b/src/diffusers/utils/dummy_torch_and_librosa_objects.py index ff60d4c4393f..2088bc4a7441 100644 --- a/src/diffusers/utils/dummy_torch_and_librosa_objects.py +++ b/src/diffusers/utils/dummy_torch_and_librosa_objects.py @@ -1,6 +1,4 @@ # This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - from ..utils import DummyObject, requires_backends diff --git a/src/diffusers/utils/dummy_torch_and_scipy_objects.py b/src/diffusers/utils/dummy_torch_and_scipy_objects.py index 13f17349bb45..a1ff25863822 100644 --- a/src/diffusers/utils/dummy_torch_and_scipy_objects.py +++ b/src/diffusers/utils/dummy_torch_and_scipy_objects.py @@ -1,6 +1,4 @@ # This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - from ..utils import DummyObject, requires_backends diff --git a/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py index e0151e23adb7..56836f0b6d77 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_and_k_diffusion_objects.py @@ -1,6 +1,4 @@ # This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - from ..utils import DummyObject, requires_backends diff --git a/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py index ae9412a95682..204500a1f195 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_and_onnx_objects.py @@ -1,6 +1,4 @@ # This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - from ..utils import DummyObject, requires_backends diff --git a/src/diffusers/utils/dummy_torch_and_transformers_objects.py b/src/diffusers/utils/dummy_torch_and_transformers_objects.py index 33fc0c72a77f..79755c27e6fe 100644 --- a/src/diffusers/utils/dummy_torch_and_transformers_objects.py +++ b/src/diffusers/utils/dummy_torch_and_transformers_objects.py @@ -1,6 +1,4 @@ # This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - from ..utils import DummyObject, requires_backends diff --git a/src/diffusers/utils/logging.py b/src/diffusers/utils/logging.py index 39a021562dae..1c682c2b0454 100644 --- a/src/diffusers/utils/logging.py +++ b/src/diffusers/utils/logging.py @@ -18,14 +18,16 @@ import os import sys import threading -from logging import CRITICAL # NOQA -from logging import DEBUG # NOQA -from logging import ERROR # NOQA -from logging import FATAL # NOQA -from logging import INFO # NOQA -from logging import NOTSET # NOQA -from logging import WARN # NOQA -from logging import WARNING # NOQA +from logging import ( + CRITICAL, # NOQA + DEBUG, # NOQA + ERROR, # NOQA + FATAL, # NOQA + INFO, # NOQA + NOTSET, # NOQA + WARN, # NOQA + WARNING, # NOQA +) from typing import Optional from tqdm import auto as tqdm_lib diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index f6bc0e92f2f4..76a470c90038 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -11,7 +11,6 @@ from typing import Optional, Union import numpy as np - import PIL.Image import PIL.ImageOps import requests diff --git a/tests/models/test_models_unet_2d_condition.py b/tests/models/test_models_unet_2d_condition.py index feb2de6e8c99..70d9d6a59881 100644 --- a/tests/models/test_models_unet_2d_condition.py +++ b/tests/models/test_models_unet_2d_condition.py @@ -18,6 +18,7 @@ import unittest import torch +from parameterized import parameterized from diffusers import UNet2DConditionModel from diffusers.models.cross_attention import CrossAttnProcessor, LoRACrossAttnProcessor @@ -31,7 +32,6 @@ torch_device, ) from diffusers.utils.import_utils import is_xformers_available -from parameterized import parameterized from ..test_modeling_common import ModelTesterMixin diff --git a/tests/models/test_models_unet_2d_flax.py b/tests/models/test_models_unet_2d_flax.py index 4b279d2f3386..69a0704dca9d 100644 --- a/tests/models/test_models_unet_2d_flax.py +++ b/tests/models/test_models_unet_2d_flax.py @@ -1,10 +1,11 @@ import gc import unittest +from parameterized import parameterized + from diffusers import FlaxUNet2DConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow -from parameterized import parameterized if is_flax_available(): diff --git a/tests/models/test_models_vae.py b/tests/models/test_models_vae.py index c0181ef03c5c..1cf4bc3c446c 100644 --- a/tests/models/test_models_vae.py +++ b/tests/models/test_models_vae.py @@ -17,11 +17,11 @@ import unittest import torch +from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.models import ModelMixin from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device -from parameterized import parameterized from ..test_modeling_common import ModelTesterMixin diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion.py b/tests/pipelines/altdiffusion/test_alt_diffusion.py index c4ea6dd1a33b..1740e9af382f 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion.py @@ -18,6 +18,7 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( @@ -26,7 +27,6 @@ ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py index baf86d07eab2..ae7d70f92473 100644 --- a/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py +++ b/tests/pipelines/altdiffusion/test_alt_diffusion_img2img.py @@ -19,6 +19,7 @@ import numpy as np import torch +from transformers import XLMRobertaTokenizer from diffusers import AltDiffusionImg2ImgPipeline, AutoencoderKL, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( @@ -27,7 +28,6 @@ ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import XLMRobertaTokenizer torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion.py b/tests/pipelines/latent_diffusion/test_latent_diffusion.py index 244987f89819..24d75068af29 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion.py @@ -18,10 +18,10 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, slow, torch_device -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py index 4f519f842b6d..bc7d2d4cd78f 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_uncond.py @@ -17,10 +17,10 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNet2DModel, VQModel from diffusers.utils.testing_utils import require_torch, slow, torch_device -from transformers import CLIPTextConfig, CLIPTextModel torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/paint_by_example/test_paint_by_example.py b/tests/pipelines/paint_by_example/test_paint_by_example.py index d02b7702d9e1..a2e04d20a067 100644 --- a/tests/pipelines/paint_by_example/test_paint_by_example.py +++ b/tests/pipelines/paint_by_example/test_paint_by_example.py @@ -19,13 +19,13 @@ import numpy as np import torch +from PIL import Image +from transformers import CLIPImageProcessor, CLIPVisionConfig from diffusers import AutoencoderKL, PaintByExamplePipeline, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from PIL import Image -from transformers import CLIPImageProcessor, CLIPVisionConfig from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion/test_cycle_diffusion.py b/tests/pipelines/stable_diffusion/test_cycle_diffusion.py index 5c088f191773..948a39786dcb 100644 --- a/tests/pipelines/stable_diffusion/test_cycle_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_cycle_diffusion.py @@ -19,11 +19,11 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/tests/pipelines/stable_diffusion/test_stable_diffusion.py index e78ae1c12518..02774d69dc29 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -21,6 +21,7 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -36,7 +37,6 @@ ) from diffusers.utils import load_numpy, nightly, slow, torch_device from diffusers.utils.testing_utils import CaptureLogger, require_torch_gpu -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from ...models.test_models_unet_2d_condition import create_lora_layers from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py index a8e8b6d36866..a7aa4051774d 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py @@ -19,6 +19,8 @@ import numpy as np import torch +from PIL import Image +from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection from diffusers import ( AutoencoderKL, @@ -29,8 +31,6 @@ ) from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from PIL import Image -from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py index 5ded9188044f..b162fe3ac610 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py @@ -19,6 +19,7 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -31,7 +32,6 @@ ) from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py index ecf65e5a14ce..c44101d13c5a 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py @@ -19,6 +19,8 @@ import numpy as np import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -31,8 +33,6 @@ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from PIL import Image -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py index 5a0957260391..d330e0f7eded 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py @@ -19,6 +19,8 @@ import numpy as np import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -33,8 +35,6 @@ ) from diffusers.utils import floats_tensor, load_image, nightly, slow, torch_device from diffusers.utils.testing_utils import load_numpy, require_torch_gpu -from PIL import Image -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py b/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py index 60131107a239..4c232b573b4f 100644 --- a/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py +++ b/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py @@ -19,6 +19,8 @@ import numpy as np import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -31,8 +33,6 @@ ) from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from PIL import Image -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py index b49aa5bc33a4..6db22626c9e0 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py @@ -18,6 +18,7 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -33,7 +34,6 @@ ) from diffusers.utils import load_numpy, nightly, slow, torch_device from diffusers.utils.testing_utils import CaptureLogger, require_torch_gpu -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py index b93184fa0985..09334afcdaaf 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -20,6 +20,15 @@ import numpy as np import torch +from PIL import Image +from transformers import ( + CLIPTextConfig, + CLIPTextModel, + CLIPTokenizer, + DPTConfig, + DPTFeatureExtractor, + DPTForDepthEstimation, +) from diffusers import ( AutoencoderKL, @@ -33,15 +42,6 @@ from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device from diffusers.utils.import_utils import is_accelerate_available from diffusers.utils.testing_utils import require_torch_gpu -from PIL import Image -from transformers import ( - CLIPTextConfig, - CLIPTextModel, - CLIPTokenizer, - DPTConfig, - DPTFeatureExtractor, - DPTForDepthEstimation, -) from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py index 096c083bef6c..0ec47a510b50 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py @@ -19,12 +19,12 @@ import numpy as np import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import require_torch_gpu, slow -from PIL import Image -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py index 21c9b8b5ccce..a6e3f9e65042 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py @@ -19,6 +19,7 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -29,7 +30,6 @@ ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from ...test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py index 9e70f1e55c4a..ff0112b3263b 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -19,12 +19,12 @@ import numpy as np import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNet2DConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from PIL import Image -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py index ea26b024f2e4..a06f13632a3a 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -19,6 +19,7 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, @@ -30,7 +31,6 @@ ) from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py index 24e5d4e0e8b2..31f6e1972f7f 100644 --- a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py +++ b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py @@ -20,12 +20,12 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/pipelines/unclip/test_unclip.py b/tests/pipelines/unclip/test_unclip.py index 06ad4203f020..c065ee7ea0f4 100644 --- a/tests/pipelines/unclip/test_unclip.py +++ b/tests/pipelines/unclip/test_unclip.py @@ -18,12 +18,12 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import PriorTransformer, UnCLIPPipeline, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import load_numpy, nightly, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from ...test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference diff --git a/tests/pipelines/unclip/test_unclip_image_variation.py b/tests/pipelines/unclip/test_unclip_image_variation.py index 804ef7f7c387..e320e48d198a 100644 --- a/tests/pipelines/unclip/test_unclip_image_variation.py +++ b/tests/pipelines/unclip/test_unclip_image_variation.py @@ -19,6 +19,14 @@ import numpy as np import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) from diffusers import ( DiffusionPipeline, @@ -30,14 +38,6 @@ from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import load_image, require_torch_gpu -from transformers import ( - CLIPImageProcessor, - CLIPTextConfig, - CLIPTextModelWithProjection, - CLIPTokenizer, - CLIPVisionConfig, - CLIPVisionModelWithProjection, -) from ...test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference diff --git a/tests/pipelines/vq_diffusion/test_vq_diffusion.py b/tests/pipelines/vq_diffusion/test_vq_diffusion.py index 0176ec0b5880..9e9468ab2ab3 100644 --- a/tests/pipelines/vq_diffusion/test_vq_diffusion.py +++ b/tests/pipelines/vq_diffusion/test_vq_diffusion.py @@ -18,12 +18,12 @@ import numpy as np import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import Transformer2DModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/repo_utils/test_check_dummies.py b/tests/repo_utils/test_check_dummies.py index d8fa9ce10547..f233b76d6f81 100644 --- a/tests/repo_utils/test_check_dummies.py +++ b/tests/repo_utils/test_check_dummies.py @@ -20,7 +20,7 @@ git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) -import check_dummies +import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 @@ -94,8 +94,6 @@ def from_pretrained(cls, *args, **kwargs): def test_create_dummy_files(self): expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - from ..utils import DummyObject, requires_backends diff --git a/tests/test_outputs.py b/tests/test_outputs.py index 3c3054c885a1..50cbd1d54ee4 100644 --- a/tests/test_outputs.py +++ b/tests/test_outputs.py @@ -3,8 +3,8 @@ from typing import List, Union import numpy as np - import PIL.Image + from diffusers.utils.outputs import BaseOutput diff --git a/tests/test_pipelines.py b/tests/test_pipelines.py index d6b080797292..71340d43b0a9 100644 --- a/tests/test_pipelines.py +++ b/tests/test_pipelines.py @@ -23,10 +23,13 @@ import unittest import numpy as np -import torch - import PIL import safetensors.torch +import torch +from parameterized import parameterized +from PIL import Image +from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPTokenizer + from diffusers import ( AutoencoderKL, DDIMPipeline, @@ -49,9 +52,6 @@ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from diffusers.utils import CONFIG_NAME, WEIGHTS_NAME, floats_tensor, is_flax_available, nightly, slow, torch_device from diffusers.utils.testing_utils import CaptureLogger, get_tests_dir, require_torch_gpu -from parameterized import parameterized -from PIL import Image -from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPTokenizer torch.backends.cuda.matmul.allow_tf32 = False diff --git a/tests/test_pipelines_flax.py b/tests/test_pipelines_flax.py index 9b9dcddd6060..4f0053537739 100644 --- a/tests/test_pipelines_flax.py +++ b/tests/test_pipelines_flax.py @@ -26,11 +26,12 @@ if is_flax_available(): import jax import jax.numpy as jnp - from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline from flax.jax_utils import replicate from flax.training.common_utils import shard from jax import pmap + from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline + @require_flax class DownloadTests(unittest.TestCase): diff --git a/utils/check_dummies.py b/utils/check_dummies.py index 38fccca1c95f..e0cea048cf15 100644 --- a/utils/check_dummies.py +++ b/utils/check_dummies.py @@ -119,7 +119,6 @@ def create_dummy_files(backend_specific_objects=None): for backend, objects in backend_specific_objects.items(): backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]" dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" - dummy_file += "# flake8: noqa\n\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects]) dummy_files[backend] = dummy_file diff --git a/utils/check_repo.py b/utils/check_repo.py index c495c9ded94b..977203a0043a 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -342,7 +342,7 @@ def get_model_test_files(): path = os.path.join(target_dir, file_or_dir) if os.path.isfile(path): filename = os.path.split(path)[-1] - if "test_modeling" in filename and not os.path.splitext(filename)[0] in _ignore_files: + if "test_modeling" in filename and os.path.splitext(filename)[0] not in _ignore_files: file = os.path.join(*path.split(os.sep)[1:]) test_files.append(file)