diff --git a/.github/workflows/build_one.yml b/.github/workflows/build_one.yml index 44b6a887e..f09116578 100644 --- a/.github/workflows/build_one.yml +++ b/.github/workflows/build_one.yml @@ -118,7 +118,7 @@ jobs: git push --force "https://pyviz-developers:${{ secrets.GITHUB_TOKEN }}@github.com/holoviz-topics/examples.git" HEAD:$BRANCHNAME git checkout ${{ github.ref_name }} - name: clean up - run: doit clean --clean-dep build:${{ inputs.project }} + run: doit clean --clean-dep "build:${{ inputs.project }}" - name: git diff run: git diff - name: check clean up diff --git a/stable_diffusion/anaconda-project-lock.yml b/stable_diffusion/anaconda-project-lock.yml new file mode 100644 index 000000000..922763968 --- /dev/null +++ b/stable_diffusion/anaconda-project-lock.yml @@ -0,0 +1,360 @@ +# This is an Anaconda project lock file. +# The lock file locks down exact versions of all your dependencies. +# +# In most cases, this file is automatically maintained by the `anaconda-project` command or GUI tools. +# It's best to keep this file in revision control (such as git or svn). +# The file is in YAML format, please see http://www.yaml.org/start.html for more. +# + +# +# Set to false to ignore locked versions. +# +locking_enabled: true + +# +# A key goes in here for each env spec. +# +env_specs: + default: + locked: false + stable-diffusion-m1: + locked: true + env_spec_hash: 467062e441ee3e7b7d3f0f3a0ce239b6223dc117 + platforms: + - linux-64 + - osx-arm64 + packages: + unix: + - accelerate=0.16.0=pyhd8ed1ab_0 + - aiosignal=1.3.1=pyhd8ed1ab_0 + - anyio=3.6.2=pyhd8ed1ab_0 + - appnope=0.1.3=pyhd8ed1ab_0 + - argon2-cffi=21.3.0=pyhd8ed1ab_0 + - asttokens=2.2.1=pyhd8ed1ab_0 + - async-timeout=4.0.2=pyhd8ed1ab_0 + - attrs=22.2.0=pyh71513ae_0 + - backcall=0.2.0=pyh9f0ad1d_0 + - backports.functools_lru_cache=1.6.4=pyhd8ed1ab_0 + - backports=1.0=pyhd8ed1ab_3 + - beautifulsoup4=4.11.2=pyha770c72_0 + - bleach=6.0.0=pyhd8ed1ab_0 + - bokeh=2.4.3=pyhd8ed1ab_3 + - certifi=2022.12.7=pyhd8ed1ab_0 + - charset-normalizer=2.1.1=pyhd8ed1ab_0 + - click=8.1.3=unix_pyhd8ed1ab_2 + - colorama=0.4.6=pyhd8ed1ab_0 + - comm=0.1.2=pyhd8ed1ab_0 + - dataclasses=0.8=pyhc8e2a94_3 + - datasets=2.10.0=pyhd8ed1ab_0 + - decorator=5.1.1=pyhd8ed1ab_0 + - defusedxml=0.7.1=pyhd8ed1ab_0 + - diffusers=0.13.1=pyhd8ed1ab_0 + - dill=0.3.6=pyhd8ed1ab_1 + - entrypoints=0.4=pyhd8ed1ab_0 + - executing=1.2.0=pyhd8ed1ab_0 + - filelock=3.9.0=pyhd8ed1ab_0 + - flit-core=3.8.0=pyhd8ed1ab_0 + - fsspec=2023.1.0=pyhd8ed1ab_0 + - ftfy=6.1.1=pyhd8ed1ab_0 + - huggingface_hub=0.12.1=pyhd8ed1ab_0 + - idna=3.4=pyhd8ed1ab_0 + - importlib-metadata=6.0.0=pyha770c72_0 + - importlib_metadata=6.0.0=hd8ed1ab_0 + - importlib_resources=5.12.0=pyhd8ed1ab_0 + - ipykernel=6.21.2=pyh736e0ef_0 + - ipython=8.11.0=pyhd1c38e8_0 + - ipython_genutils=0.2.0=py_1 + - jedi=0.18.2=pyhd8ed1ab_0 + - jinja2=3.1.2=pyhd8ed1ab_1 + - joblib=1.2.0=pyhd8ed1ab_0 + - jsonschema=4.17.3=pyhd8ed1ab_0 + - jupyter_client=8.0.3=pyhd8ed1ab_0 + - jupyter_events=0.6.3=pyhd8ed1ab_0 + - jupyter_server=2.3.0=pyhd8ed1ab_0 + - jupyter_server_terminals=0.4.4=pyhd8ed1ab_1 + - jupyterlab_pygments=0.2.2=pyhd8ed1ab_0 + - markdown=3.4.1=pyhd8ed1ab_0 + - matplotlib-inline=0.1.6=pyhd8ed1ab_0 + - mistune=2.0.5=pyhd8ed1ab_0 + - nbclassic=0.5.2=pyhd8ed1ab_0 + - nbclient=0.7.2=pyhd8ed1ab_0 + - nbconvert-core=7.2.9=pyhd8ed1ab_0 + - nbconvert-pandoc=7.2.9=pyhd8ed1ab_0 + - nbconvert=7.2.9=pyhd8ed1ab_0 + - nbformat=5.7.3=pyhd8ed1ab_0 + - nest-asyncio=1.5.6=pyhd8ed1ab_0 + - notebook-shim=0.2.2=pyhd8ed1ab_0 + - notebook=6.5.2=pyha770c72_1 + - packaging=23.0=pyhd8ed1ab_0 + - pandocfilters=1.5.0=pyhd8ed1ab_0 + - panel=0.14.3=py_0 + - param=1.12.3=py_0 + - parquet-cpp=1.5.1=2 + - parso=0.8.3=pyhd8ed1ab_0 + - pexpect=4.8.0=pyh1a96a4e_2 + - pickleshare=0.7.5=py_1003 + - pip=23.0.1=pyhd8ed1ab_0 + - pkgutil-resolve-name=1.3.10=pyhd8ed1ab_0 + - platformdirs=3.0.0=pyhd8ed1ab_0 + - prometheus_client=0.16.0=pyhd8ed1ab_0 + - prompt-toolkit=3.0.38=pyha770c72_0 + - prompt_toolkit=3.0.38=hd8ed1ab_0 + - ptyprocess=0.7.0=pyhd3deb0d_0 + - pure_eval=0.2.2=pyhd8ed1ab_0 + - pycparser=2.21=pyhd8ed1ab_0 + - pyct-core=0.5.0=py_0 + - pyct=0.5.0=py_0 + - pygments=2.14.0=pyhd8ed1ab_0 + - pyopenssl=23.0.0=pyhd8ed1ab_0 + - pysocks=1.7.1=pyha2e5f31_6 + - python-dateutil=2.8.2=pyhd8ed1ab_0 + - python-fastjsonschema=2.16.3=pyhd8ed1ab_0 + - python-json-logger=2.0.7=pyhd8ed1ab_0 + - python_abi=3.10=3_cp310 + - pytz=2022.7.1=pyhd8ed1ab_0 + - pyviz_comms=2.2.1=py_0 + - requests=2.28.2=pyhd8ed1ab_0 + - responses=0.18.0=pyhd8ed1ab_0 + - rfc3339-validator=0.1.4=pyhd8ed1ab_0 + - rfc3986-validator=0.1.1=pyh9f0ad1d_0 + - sacremoses=0.0.53=pyhd8ed1ab_0 + - send2trash=1.8.0=pyhd8ed1ab_0 + - setuptools=67.4.0=pyhd8ed1ab_0 + - six=1.16.0=pyh6c4a22f_0 + - sniffio=1.3.0=pyhd8ed1ab_0 + - soupsieve=2.3.2.post1=pyhd8ed1ab_0 + - stack_data=0.6.2=pyhd8ed1ab_0 + - terminado=0.17.1=pyhd1c38e8_0 + - tinycss2=1.2.1=pyhd8ed1ab_0 + - tqdm=4.64.1=pyhd8ed1ab_0 + - traitlets=5.9.0=pyhd8ed1ab_0 + - transformers=4.26.1=pyhd8ed1ab_0 + - typing-extensions=4.4.0=hd8ed1ab_0 + - typing_extensions=4.4.0=pyha770c72_0 + - tzdata=2022g=h191b570_0 + - urllib3=1.26.14=pyhd8ed1ab_0 + - wcwidth=0.2.6=pyhd8ed1ab_0 + - webencodings=0.5.1=py_1 + - websocket-client=1.5.1=pyhd8ed1ab_0 + - wheel=0.38.4=pyhd8ed1ab_0 + - zipp=3.15.0=pyhd8ed1ab_0 + linux-64: + - _libgcc_mutex=0.1=conda_forge + - _openmp_mutex=4.5=2_gnu + - aiohttp=3.8.4=py310h1fa729e_0 + - argon2-cffi-bindings=21.2.0=py310h5764c6d_3 + - arrow-cpp=11.0.0=ha770c72_5_cpu + - aws-c-auth=0.6.24=h84a1944_5 + - aws-c-cal=0.5.20=hc60faf5_6 + - aws-c-common=0.8.11=h0b41bf4_0 + - aws-c-compression=0.2.16=h034cb4b_3 + - aws-c-event-stream=0.2.18=h75388cd_6 + - aws-c-http=0.7.4=hf084cc8_2 + - aws-c-io=0.13.17=h10df833_2 + - aws-c-mqtt=0.8.6=hc41645a_6 + - aws-c-s3=0.2.4=h1b8f470_3 + - aws-c-sdkutils=0.1.7=h034cb4b_3 + - aws-checksums=0.1.14=h034cb4b_3 + - aws-crt-cpp=0.19.7=h0073717_7 + - aws-sdk-cpp=1.10.57=h4707e7a_4 + - blas=2.16=mkl + - brotlipy=0.7.0=py310h5764c6d_1005 + - bzip2=1.0.8=h7f98852_4 + - c-ares=1.18.1=h7f98852_0 + - ca-certificates=2022.12.7=ha878542_0 + - cffi=1.15.1=py310h255011f_3 + - cryptography=39.0.1=py310h34c0648_0 + - debugpy=1.6.6=py310heca2aa9_0 + - freetype=2.12.1=hca18f0e_1 + - frozenlist=1.3.3=py310h5764c6d_0 + - gflags=2.2.2=he1b5a44_1004 + - glog=0.6.0=h6f12383_0 + - intel-openmp=2023.0.0=h9e868ea_25371 + - jpeg=9e=h0b41bf4_3 + - jupyter_core=5.2.0=py310hff52083_0 + - keyutils=1.6.1=h166bdaf_0 + - krb5=1.20.1=h81ceb04_0 + - lcms2=2.14=hfd0df8a_1 + - ld_impl_linux-64=2.40=h41732ed_0 + - lerc=4.0.0=h27087fc_0 + - libabseil=20220623.0=cxx17_h05df665_6 + - libarrow=11.0.0=h2ebd325_5_cpu + - libblas=3.8.0=16_mkl + - libbrotlicommon=1.0.9=h166bdaf_8 + - libbrotlidec=1.0.9=h166bdaf_8 + - libbrotlienc=1.0.9=h166bdaf_8 + - libcblas=3.8.0=16_mkl + - libcrc32c=1.1.2=h9c3ff4c_0 + - libcurl=7.88.1=hdc1c0ab_0 + - libdeflate=1.17=h0b41bf4_0 + - libedit=3.1.20191231=he28a2e2_2 + - libev=4.33=h516909a_1 + - libevent=2.1.10=h28343ad_4 + - libffi=3.4.2=h7f98852_5 + - libgcc-ng=12.2.0=h65d4601_19 + - libgfortran-ng=7.5.0=h14aa051_20 + - libgfortran4=7.5.0=h14aa051_20 + - libgomp=12.2.0=h65d4601_19 + - libgoogle-cloud=2.7.0=h21dfe5b_1 + - libgrpc=1.51.1=h4fad500_1 + - liblapack=3.8.0=16_mkl + - liblapacke=3.8.0=16_mkl + - libnghttp2=1.51.0=hff17c54_0 + - libnsl=2.0.0=h7f98852_0 + - libpng=1.6.39=h753d276_0 + - libprotobuf=3.21.12=h3eb15da_0 + - libsodium=1.0.18=h36c2ea0_1 + - libsqlite=3.40.0=h753d276_0 + - libssh2=1.10.0=hf14f497_3 + - libstdcxx-ng=12.2.0=h46fd767_19 + - libthrift=0.18.0=h5e4af38_0 + - libtiff=4.5.0=h6adf6a1_2 + - libutf8proc=2.8.0=h166bdaf_0 + - libuuid=2.32.1=h7f98852_1000 + - libwebp-base=1.2.4=h166bdaf_0 + - libxcb=1.13=h7f98852_1004 + - libzlib=1.2.13=h166bdaf_4 + - lz4-c=1.9.4=hcb278e6_0 + - markupsafe=2.1.2=py310h1fa729e_0 + - mkl=2020.2=256 + - multidict=6.0.4=py310h1fa729e_0 + - multiprocess=0.70.14=py310h5764c6d_3 + - ncurses=6.3=h27087fc_1 + - numpy=1.22.4=py310h4ef5377_0 + - openjpeg=2.5.0=hfec8fc6_2 + - openssl=3.0.8=h0b41bf4_0 + - orc=1.8.2=hfdbbad2_2 + - pandas=1.5.3=py310h9b08913_0 + - pandoc=2.19.2=h32600fe_1 + - pillow=9.4.0=py310h023d228_1 + - psutil=5.9.4=py310h5764c6d_0 + - pthread-stubs=0.4=h36c2ea0_1001 + - pyarrow=11.0.0=py310h633f555_5_cpu + - pyrsistent=0.19.3=py310h1fa729e_0 + - python-xxhash=3.2.0=py310h1fa729e_0 + - python=3.10.9=he550d4f_0_cpython + - pytorch-mutex=1.0=cpu + - pytorch=1.13.1=py3.10_cpu_0 + - pyyaml=6.0=py310h5764c6d_5 + - pyzmq=25.0.0=py310h059b190_0 + - re2=2023.02.01=hcb278e6_0 + - readline=8.1.2=h0f457ee_0 + - regex=2022.10.31=py310h5764c6d_0 + - s2n=1.3.37=h3358134_0 + - snappy=1.1.9=hbd366e4_2 + - tk=8.6.12=h27826a3_0 + - tokenizers=0.13.2=py310he1f1126_0 + - tornado=6.2=py310h5764c6d_1 + - xorg-libxau=1.0.9=h7f98852_0 + - xorg-libxdmcp=1.1.3=h7f98852_0 + - xxhash=0.8.1=h0b41bf4_0 + - xz=5.2.6=h166bdaf_0 + - yaml=0.2.5=h7f98852_2 + - yarl=1.8.2=py310h5764c6d_0 + - zeromq=4.3.4=h9c3ff4c_1 + - zlib=1.2.13=h166bdaf_4 + - zstd=1.5.2=h3eb15da_6 + osx-arm64: + - aiohttp=3.8.4=py310h8e9501a_0 + - argon2-cffi-bindings=21.2.0=py310h8e9501a_3 + - arrow-cpp=11.0.0=hce30654_5_cpu + - aws-c-auth=0.6.24=he8f13b4_5 + - aws-c-cal=0.5.20=h9571af1_6 + - aws-c-common=0.8.11=h1a8c8d9_0 + - aws-c-compression=0.2.16=h7334ab6_3 + - aws-c-event-stream=0.2.18=ha663d55_6 + - aws-c-http=0.7.4=h49dec38_2 + - aws-c-io=0.13.17=h323b671_2 + - aws-c-mqtt=0.8.6=hdc0f556_6 + - aws-c-s3=0.2.4=hbb4c6b3_3 + - aws-c-sdkutils=0.1.7=h7334ab6_3 + - aws-checksums=0.1.14=h7334ab6_3 + - aws-crt-cpp=0.19.7=h6f6c549_7 + - aws-sdk-cpp=1.10.57=hbe10753_4 + - brotlipy=0.7.0=py310h8e9501a_1005 + - bzip2=1.0.8=h3422bc3_4 + - c-ares=1.18.1=h3422bc3_0 + - ca-certificates=2022.12.7=h4653dfc_0 + - cffi=1.15.1=py310h2399d43_3 + - cryptography=39.0.1=py310hfc83b78_0 + - debugpy=1.6.6=py310h0f1eb42_0 + - freetype=2.12.1=hd633e50_1 + - frozenlist=1.3.3=py310h8e9501a_0 + - gflags=2.2.2=hc88da5d_1004 + - glog=0.6.0=h6da1cb0_0 + - jpeg=9e=h1a8c8d9_3 + - jupyter_core=5.2.0=py310hbe9552e_0 + - krb5=1.20.1=h69eda48_0 + - lcms2=2.14=h481adae_1 + - lerc=4.0.0=h9a09cb3_0 + - libabseil=20220623.0=cxx17_h28b99d4_6 + - libarrow=11.0.0=h0b9b5d1_5_cpu + - libblas=3.9.0=16_osxarm64_openblas + - libbrotlicommon=1.0.9=h1a8c8d9_8 + - libbrotlidec=1.0.9=h1a8c8d9_8 + - libbrotlienc=1.0.9=h1a8c8d9_8 + - libcblas=3.9.0=16_osxarm64_openblas + - libcrc32c=1.1.2=hbdafb3b_0 + - libcurl=7.88.1=h9049daf_0 + - libcxx=15.0.7=h75e25f2_0 + - libdeflate=1.17=h1a8c8d9_0 + - libedit=3.1.20191231=hc8eb9b7_2 + - libev=4.33=h642e427_1 + - libevent=2.1.10=h7673551_4 + - libffi=3.4.2=h3422bc3_5 + - libgfortran5=11.3.0=hdaf2cc0_28 + - libgfortran=5.0.0=11_3_0_hd922786_28 + - libgoogle-cloud=2.7.0=hcf11473_1 + - libgrpc=1.51.1=hb15be72_1 + - liblapack=3.9.0=16_osxarm64_openblas + - libnghttp2=1.51.0=hae82a92_0 + - libopenblas=0.3.21=openmp_hc731615_3 + - libpng=1.6.39=h76d750c_0 + - libprotobuf=3.21.12=hb5ab8b9_0 + - libsodium=1.0.18=h27ca646_1 + - libsqlite=3.40.0=h76d750c_0 + - libssh2=1.10.0=h7a5bd25_3 + - libthrift=0.18.0=h6635e49_0 + - libtiff=4.5.0=h5dffbdd_2 + - libutf8proc=2.8.0=h1a8c8d9_0 + - libwebp-base=1.2.4=h57fd34a_0 + - libxcb=1.13=h9b22ae9_1004 + - libzlib=1.2.13=h03a7124_4 + - llvm-openmp=15.0.7=h7cfbb63_0 + - lz4-c=1.9.4=hb7217d7_0 + - markupsafe=2.1.2=py310h8e9501a_0 + - multidict=6.0.4=py310h8e9501a_0 + - multiprocess=0.70.14=py310h8e9501a_3 + - ncurses=6.3=h07bb92c_1 + - numpy=1.24.2=py310h3d2048e_0 + - openjpeg=2.5.0=hbc2ba62_2 + - openssl=3.0.8=h03a7124_0 + - orc=1.8.2=hef0d403_2 + - pandas=1.5.3=py310h2b830bf_0 + - pandoc=2.19.2=hce30654_1 + - pillow=9.4.0=py310h5a7539a_1 + - psutil=5.9.4=py310h8e9501a_0 + - pthread-stubs=0.4=h27ca646_1001 + - pyarrow=11.0.0=py310h89f3c6b_5_cpu + - pyrsistent=0.19.3=py310h8e9501a_0 + - python-xxhash=3.2.0=py310h8e9501a_0 + - python=3.10.9=h3ba56d0_0_cpython + - pytorch=1.13.1=py3.10_0 + - pyyaml=6.0=py310h8e9501a_5 + - pyzmq=25.0.0=py310hc407298_0 + - re2=2023.02.01=hb7217d7_0 + - readline=8.1.2=h46ed386_0 + - regex=2022.10.31=py310h8e9501a_0 + - snappy=1.1.9=h17c5cce_2 + - tk=8.6.12=he1e0b03_0 + - tokenizers=0.13.2=py310he8402e3_0 + - tornado=6.2=py310h8e9501a_1 + - xorg-libxau=1.0.9=h27ca646_0 + - xorg-libxdmcp=1.1.3=h27ca646_0 + - xxhash=0.8.1=h1a8c8d9_0 + - xz=5.2.6=h57fd34a_0 + - yaml=0.2.5=h3422bc3_2 + - yarl=1.8.2=py310h8e9501a_0 + - zeromq=4.3.4=hbdafb3b_1 + - zlib=1.2.13=h03a7124_4 + - zstd=1.5.2=hf913c23_6 diff --git a/stable_diffusion/anaconda-project.yml b/stable_diffusion/anaconda-project.yml new file mode 100644 index 000000000..bafc48f7e --- /dev/null +++ b/stable_diffusion/anaconda-project.yml @@ -0,0 +1,76 @@ +# To reproduce: install 'anaconda-project', then 'anaconda-project run' +# (but see -m1 commands below if on an ARM64 Mac) + +name: stable_diffusion +description: Panel app for working with Stable Diffusion text2image models + +examples_config: + created: 2022-01-30 + maintainers: + - "sandhujasmine" + labels: + - "panel" + no_data_ingestion: true + gh_runner: "macos-latest" + +user_fields: [examples_config] + +channels: +- pyviz +- conda-forge + +packages: &pkgs +- notebook >=6.5.2 +- panel >=0.14.2 +- diffusers >=0.11.1 +- transformers >=4.24.0 +- ftfy >=6.1.1 +- accelerate >=0.15.0 + +dependencies: *pkgs + +commands: + notebook: + description: Run notebook on a linux-64 machine + notebook: stable_diffusion.ipynb + dashboard: + description: Run panel dashboard on a linux-64 machine + unix: panel serve --rest-session-info --session-history -1 stable_diffusion.ipynb --static-dirs thumbnails=./thumbnails + supports_http_options: true + notebook-m1: + description: Run notebook on an OSX-M1 machine + notebook: stable_diffusion.ipynb + env_spec: stable-diffusion-m1 + dashboard-m1: + description: Run panel dashboard on an OSX-M1 machine + unix: panel serve --rest-session-info --session-history -1 stable_diffusion.ipynb --static-dirs thumbnails=./thumbnails + supports_http_options: true + env_spec: stable-diffusion-m1 + +variables: {} + +downloads: {} +platforms: +- linux-64 + +env_specs: + default: + description: Default environment spec for running commands (linux-64) + packages: + - python >=3.11.0 + - pytorch >=1.13.0 + channels: + - conda-forge + - nodefaults + platforms: + - linux-64 + stable-diffusion-m1: + description: Env for osx-arm64 M1 for running app + packages: + - python>=3.10,<3.11.0a0 + - pytorch >=1.13.1 + channels: + - pytorch + - conda-forge + platforms: + - osx-arm64 diff --git a/stable_diffusion/stable_diffusion.ipynb b/stable_diffusion/stable_diffusion.ipynb new file mode 100644 index 000000000..cefbb9ba0 --- /dev/null +++ b/stable_diffusion/stable_diffusion.ipynb @@ -0,0 +1,603 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f851a655", + "metadata": {}, + "source": [ + "# Stable Diffusion with Panel UI" + ] + }, + { + "cell_type": "markdown", + "id": "459efa47", + "metadata": {}, + "source": [ + "[Stable Diffusion](https://en.wikipedia.org/wiki/Stable_Diffusion#:~:text=Stable%20Diffusion%20is%20a%20deep,guided%20by%20a%20text%20prompt) is a deep learning model released in 2022. Stable Diffusion can generate detailed, realistic images from text descriptions of what the image should contain or how it should appear. \n", + "\n", + "This example demonstrates how to use [Panel](https://panel.holoviz.org) to create a web browser application for running the [Diffusers library](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb), using pre-trained models from the runwayml and CompVis repositories. See [Diffusers on github](https://github.com/huggingface/diffusers#stable-diffusion-is-fully-compatible-with-diffusers) or the blogpost on [Stable Diffusion with Diffusers](https://huggingface.co/blog/stable_diffusion) for more details on the algorithm and the training set.\n", + "\n", + "## TL;DR\n", + "\n", + "This app should generate images in seconds on a system with a supported GPU, or in minutes on a CPU. It has been tested for deployment on osx-M1 with its integrated GPU, linux-64 with Nvidia GPUs (Quadro RTX 8000) installed, and linux-64 with only a CPU (no GPU; much slower). \n", + "\n", + "The app downloads two models from huggingface to `~/.cache/huggingface`, which take up ~ 17GB of disk space. You can run the code as a notebook or as a deployed dashboard/app if you first install anaconda-project and then run the appropriate command for your system:\n", + "\n", + "```\n", + "# run notebook on linux-64 system\n", + "anaconda-project run \n", + "\n", + "# run notebook on OSX-M1 system\n", + "anaconda-project run notebook-m1\n", + "\n", + "# run panel dashboard app on linux-64 system\n", + "anaconda-project run dashboard\n", + "\n", + "# run panel dashboard app on OSX-M1 system\n", + "anaconda-project run dashboard-m1\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2cebb495", + "metadata": {}, + "outputs": [], + "source": [ + "from bokeh.resources import INLINE\n", + "from bokeh.io import output_notebook\n", + "output_notebook(resources=INLINE)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fb6e06c2", + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "from contextlib import contextmanager\n", + "from collections import deque\n", + "\n", + "import torch\n", + "import random\n", + "from diffusers import StableDiffusionPipeline\n", + "\n", + "import panel as pn\n", + "pn.extension()\n", + "\n", + "@contextmanager\n", + "def exec_time(description=\"Task\"):\n", + " \"\"\"Context manager to measure execution time and print it to the console\"\"\"\n", + " st = time.perf_counter()\n", + " yield\n", + " print(f\"{description}: {time.perf_counter() - st:.2f} sec\")" + ] + }, + { + "cell_type": "markdown", + "id": "6e2e53a2", + "metadata": {}, + "source": [ + "## Invoking Stable Diffusion on a prompt\n", + "\n", + "The `init_model` function below will first look in the default cache location used by huggingface to find downloaded pretrained models. If these haven't been downloaded yet, it will first download the models. On subsequent restarts of the app, it will load the models from the local disk cache.\n", + "\n", + "

\n", + "

(Optional: how to download models manually)\n", + "
\n",
+    "  pipe, cache = StableDiffusionPipeline.from_pretrained(\"runwayml/stable-diffusion-v1-5\", return_cached_folder=True, local_files_only=False)\n",
+    "  pipe, cache = StableDiffusionPipeline.from_pretrained(\"CompVis/stable-diffusion-v1-4\",  return_cached_folder=True, local_files_only=False)\n",
+    "  print(cache) # to see the default cache location\n",
+    "
\n", + "
\n", + "\n", + "In addition to caching the pretrained model, we also initialize and cache the in-memory diffusers pipeline inside `panel.state.cache`. This ensures that each new visitor to the page does not load the same model into memory again.\n", + "\n", + "The initial page load takes an extra ~10 sec or so (on a Quadro RTX 8000) and allocates the GPU memory required to load the pipeline in memory. Subsequent visitors get this pipeline from panel's cache. The memory overhead per visitor is then the amount needed to generate the image text prompt.\n", + "\n", + "


(Optional: performance details)

\n", + "\n", + "[Managing memory](https://huggingface.co/docs/diffusers/optimization/fp16#memory-and-speed)\n", + "\n", + "Sample output from `nvidia-smi` with memory usage information, running on a machine with Quadro RTX 8000 GPUs, after both models load:\n", + "\n", + "
\n",
+    "+-----------------------------------------------------------------------------+\n",
+    "| NVIDIA-SMI 515.65.01    Driver Version: 515.65.01    CUDA Version: 11.7     |\n",
+    "|-------------------------------+----------------------+----------------------+\n",
+    "| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n",
+    "| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n",
+    "|                               |                      |               MIG M. |\n",
+    "|===============================+======================+======================|\n",
+    "|   0  Quadro RTX 8000     Off  | 00000000:15:00.0 Off |                  Off |\n",
+    "| 33%   33C    P8    24W / 260W |     48MiB / 49152MiB |      0%      Default |\n",
+    "|                               |                      |                  N/A |\n",
+    "+-------------------------------+----------------------+----------------------+\n",
+    "|   1  Quadro RTX 8000     Off  | 00000000:2D:00.0 Off |                  Off |\n",
+    "| 33%   40C    P8    29W / 260W |   5933MiB / 49152MiB |      0%      Default |\n",
+    "|                               |                      |                  N/A |\n",
+    "+-------------------------------+----------------------+----------------------+\n",
+    "\n",
+    "+-----------------------------------------------------------------------------+\n",
+    "| Processes:                                                                  |\n",
+    "|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |\n",
+    "|        ID   ID                                                   Usage      |\n",
+    "|=============================================================================|\n",
+    "|    0   N/A  N/A      2024      G   /usr/lib/xorg/Xorg                 23MiB |\n",
+    "|    0   N/A  N/A      2545      G   /usr/bin/gnome-shell               20MiB |\n",
+    "|    1   N/A  N/A      2024      G   /usr/lib/xorg/Xorg                  4MiB |\n",
+    "|    1   N/A  N/A   2263594      C   .../diffusers/bin/python3.11     5925MiB |\n",
+    "+-----------------------------------------------------------------------------+\n",
+    "
\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "537b4a42", + "metadata": {}, + "outputs": [], + "source": [ + "random_int_range = 1, int(1e6)\n", + "\n", + "def init_model(model, cuda, mps, local_files_only=True):\n", + " print(f\"Init model: {model}\")\n", + " pipe = StableDiffusionPipeline.from_pretrained(\n", + " model,\n", + " torch_dtype=torch.float16 if cuda or mps else None,\n", + " local_files_only=local_files_only\n", + " )\n", + "\n", + " # let torch choose the GPU if more than 1 is available\n", + " if cuda:\n", + " pipe.to(\"cuda\")\n", + " elif mps:\n", + " pipe.to(\"mps\")\n", + " pipe.enable_attention_slicing()\n", + " return pipe\n", + "\n", + "\n", + "if 'pipelines' in pn.state.cache:\n", + " print(\"load from cache\")\n", + " pipelines = pn.state.cache['pipelines']\n", + " pseudo_rand_gen = pn.state.cache['pseudo_rand_gen']\n", + "else:\n", + " cuda = torch.cuda.is_available()\n", + " mps = torch.backends.mps.is_available()\n", + " device = 'cuda' if cuda else 'cpu'\n", + "\n", + " models = ['runwayml/stable-diffusion-v1-5',\n", + " 'CompVis/stable-diffusion-v1-4']\n", + "\n", + " pseudo_rand_gen = torch.Generator(device=device)\n", + " with exec_time(\"Load models\"):\n", + " pipelines = dict()\n", + " for m in models:\n", + " try:\n", + " # try to load files from cache first\n", + " pipelines[m] = init_model(m, cuda, mps)\n", + " except OSError:\n", + " pipelines[m] = init_model(m, cuda, mps, local_files_only=False)\n", + "\n", + " pn.state.cache['pipelines'] = pipelines\n", + " pn.state.cache['pseudo_rand_gen'] = pseudo_rand_gen\n", + " print(\"Save to cache\")\n", + "\n", + "\n", + "default_model = next(iter(pipelines))" + ] + }, + { + "cell_type": "markdown", + "id": "2f844afb", + "metadata": {}, + "source": [ + "Now that we have a model, we can invoke it to generate an output (uncomment if needed):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fb8ff098", + "metadata": {}, + "outputs": [], + "source": [ + "# pipelines[default_model](prompt=\"Chair made from twisted vines, in a manicured garden\",\n", + "# generator=pseudo_rand_gen.manual_seed(5))[0][0]" + ] + }, + { + "cell_type": "markdown", + "id": "f5a44129", + "metadata": {}, + "source": [ + "## Cleaner interface, with parameters\n", + "\n", + "That's pretty awkward to run, so let's use [Param](https://param.holoviz.org/) to document what the user parameters are and provide a cleaner interface:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1eb91421", + "metadata": {}, + "outputs": [], + "source": [ + "import param\n", + "\n", + "class StableDiffusion(param.Parameterized):\n", + " prompt = param.String(doc=\"\"\"\n", + " Text describing the image you wish to generate\"\"\")\n", + "\n", + " negative_prompt = param.String(doc=\"\"\"\n", + " Text describing what _not_ to include in the image (for refining results)\"\"\")\n", + "\n", + " model = param.Selector(objects=list(pipelines), default=default_model, doc=\"\"\"\n", + " A pre-trained model to be used for inference\"\"\")\n", + "\n", + " _size_range = tuple(448 + i*2**6 for i in range(10))\n", + " width = param.Selector(_size_range, default=_size_range[1], doc=\"\"\"\n", + " Width (in pixels) of the images to generate\"\"\")\n", + "\n", + " height = param.Selector(_size_range, default=_size_range[1], doc=\"\"\"\n", + " Height (in pixels) of the images to generate\"\"\")\n", + "\n", + " guidance_scale = param.Number(bounds=(5, 10), softbounds=(7, 8.5), step=0.1, default=7.5, doc=\"\"\"\n", + " How closely the model should try to match the prompt, at the\n", + " potential expense of image quality or diversity.\n", + " Also known as CFG (Classifier-free guidance scale).\"\"\")\n", + "\n", + " num_steps = param.Integer(label='# of steps', bounds=(10, 75), default=30, doc=\"\"\"\n", + " How many denoising steps to take.\n", + " More steps takes longer but gives a more-refined image.\"\"\")\n", + "\n", + " seed = param.Integer(label='Random seed', default=1,\n", + " bounds=random_int_range, step=10, precedence=1, doc=\"\"\"\n", + " Seed controlling the noise values generated.\"\"\")\n", + "\n", + " generate = param.Event(precedence=1)\n", + "\n", + " @param.depends(\"generate\")\n", + " def __call__(self, **params):\n", + " p = param.ParamOverrides(self, params)\n", + " pipe = pipelines[p.model]\n", + "\n", + " res = pipe(num_inference_steps=p.num_steps, generator=pseudo_rand_gen.manual_seed(p.seed),\n", + " **{k:p[k] for k in ['prompt', 'negative_prompt', 'guidance_scale', 'height', 'width']})\n", + "\n", + " return res.images[0]\n", + "\n", + "\n", + "sd = StableDiffusion()" + ] + }, + { + "cell_type": "markdown", + "id": "f30c6343", + "metadata": {}, + "source": [ + "Now that we have a Parameterized object, we can invoke it to generate an output (uncomment if needed):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6094c8a", + "metadata": {}, + "outputs": [], + "source": [ + "# sd(prompt=\"Chair made from twisted vines, in a manicured garden\", seed=5, guidance_scale=8)" + ] + }, + { + "cell_type": "markdown", + "id": "d1dbfa93", + "metadata": {}, + "source": [ + "See `help(sd)` for all the options available from the Python prompt to control how this image is generated. You can try various prompts, such as:\n", + "\n", + " 1. Wildflowers on a mountain side \n", + " 2. A dream of a distant planet, with multiple moons\n", + " 3. Valley of flowers in the Himalayas\n", + " \n", + "If the results for your prompt are not what you were hoping for, you can add hints like \"yellow\" to a negative prompt to remove yellow flowers from the image from prompt 1.\n", + "\n", + "Users can set the heights and widths of the generated image as they like, but note that the models were trained on images with resolution of 512x512, and image quality degrades if deviating from that resolution. \n", + "\n", + "For a given prompt and set of parameters, the specific image generated is deterministic, with results controlled by a random seed. Stable diffusion starts with an initial noisy image, with the goal of removing Gaussian noise in each inference step in a way that makes it more likely that the text description would apply to this image. The seed value determines the specific noise values, determining which specific image is ultimately generated. \n", + "\n", + "\n", + "## Simple Panel app\n", + "\n", + "Now it's documented and ready to use from Python, but not everyone is comfortable with the command prompt, so let's make a [Panel](https://panel.holoviz.org) app to package up this functionality for anyone to use. The above class actually works already as a very simple Panel app generating and displaying an image determined by widgets for each parameter (uncomment if needed):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "568fdef1", + "metadata": {}, + "outputs": [], + "source": [ + "# pn.Row(sd.param, sd.__call__)" + ] + }, + { + "cell_type": "markdown", + "id": "137999dd", + "metadata": {}, + "source": [ + "## Full-featured Panel app\n", + "\n", + "The simple app works, but let's be a bit more ambitious and add a gallery, plus saving parameters to the URL so that we can easily select our favorite outputs and store them or send them as URL links. We'll also customize some of the appearance and behavior of the default widgets.\n", + "\n", + "We'll first create a little HTML-based Gallery class using Panel to hold the various images generated so far:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bea1232d", + "metadata": {}, + "outputs": [], + "source": [ + "from bokeh.models.formatters import PrintfTickFormatter\n", + "from panel.layout.base import ListLike\n", + "from panel.reactive import ReactiveHTML\n", + "from panel.viewable import Viewer, Viewable\n", + "\n", + "class Gallery(ListLike, ReactiveHTML):\n", + " \"\"\"Collection of thumbnails that, when selected, restore the associated image and its parameters\"\"\"\n", + "\n", + " objects = param.List(item_type=Viewable)\n", + " current = param.Integer(default=None)\n", + " margin = param.Integer(0)\n", + "\n", + " _template = \"\"\"\n", + "
\n", + " {% for img in objects %}\n", + "
${img}
\n", + " {% endfor %}\n", + "
\n", + " \"\"\"\n", + "\n", + " _scripts = {\n", + " 'click': \"\"\"\n", + " const id = event.target.parentNode.parentNode.parentNode.id;\n", + " data.current = Number(id.split('-')[1]);\n", + " \"\"\"}" + ] + }, + { + "cell_type": "markdown", + "id": "eab04ef9", + "metadata": {}, + "source": [ + "Now let's make a more full-featured Panel application using this gallery and the above Parameterized class.\n", + "\n", + "When rendered with a template, the sidebar should ideally start out collapsed with only the `Prompt` text box visible. Opening the sidebar provides more options. \n", + "\n", + "By default this full-featured class randomizes the seed for each new image generated, but previously generated images can be reproduced if the seed value is specified along with the prompt and other parameters. To make it simple to return to specific images, the app URL is updated with the seed used to generate that image, so that returning to that URL will reproduce that specific image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "203ea92d", + "metadata": {}, + "outputs": [], + "source": [ + "class ModelUI(Viewer):\n", + " model = param.Parameter(StableDiffusion())\n", + " gallery = param.ClassSelector(class_=Gallery, default=Gallery(min_height=100), precedence=-1)\n", + " generate_image = param.Event(precedence=1)\n", + "\n", + " def __init__(self, **params):\n", + " self.history = deque(maxlen=15)\n", + " super().__init__(**params)\n", + " self.gallery.param.watch(self._restore_history, 'current')\n", + " self._restore = False\n", + " self._image_container = pn.pane.PNG(style={'border': '1px solid black'},\n", + " height=self.model.height,\n", + " width=self.model.width)\n", + " # ensure seed always starts out being set\n", + " self.model.seed = random.randint(*self.model.param.seed.bounds)\n", + " # internal variable used to ignore repeat event on generate if prompt triggers callback\n", + " self._prompt_event = False\n", + " self._on_load()\n", + "\n", + " @contextmanager\n", + " def _toggle(self, attr: str, value: bool):\n", + " # toggle state of bool attribute inside context\n", + " # if exception raised by code inside the contextmanager, set state back to original and rethrow\n", + " init_state = getattr(self, attr)\n", + " try:\n", + " setattr(self, attr, value)\n", + " yield\n", + " setattr(self, attr, not value)\n", + " except Exception as ex:\n", + " setattr(self, attr, init_state)\n", + " raise ex\n", + "\n", + " def _update_query_params(self):\n", + " \"\"\"\n", + " Remove all params first since update_query will only update the non-default values.\n", + " If the current URL has non-default values, those will be incorrect unless it is first cleared\n", + " \"\"\"\n", + " pn.state.location.search = ''\n", + " pn.state.location.update_query(**self._url_params)\n", + "\n", + " def _update_image_container(self, image):\n", + " \"\"\"update the object and the container size\"\"\"\n", + " self._image_container.object = image\n", + " self._image_container.height = self.model.height\n", + " self._image_container.width = self.model.width\n", + "\n", + "\n", + " def _restore_history(self, event):\n", + " \"\"\"\n", + " Load image from cache and update URL to reflect parameters used to generate image.\n", + " Also update the seed in the end similar so generating another image does not\n", + " recreate the restored image from history.\n", + " \"\"\"\n", + " if event.new is None:\n", + " return\n", + " self.gallery.current = None\n", + " state, image = self.history[event.new]\n", + " # discard_events will not allow widgets to update\n", + " with self._toggle('_restore', value=True):\n", + " self.model.param.update(state)\n", + " self._update_image_container(image)\n", + " self._update_query_params()\n", + " # Also update the seed so `generate_image` doesn't recreate same image\n", + " self.model.seed = random.randint(*self.model.param.seed.bounds)\n", + "\n", + " @property\n", + " def _state(self):\n", + " return {k: v for k, v in self.model.param.values().items() if k != 'name'}\n", + "\n", + " @property\n", + " def _url_params(self):\n", + " # only capture state that deviates from default\n", + " state = {key: getattr(self.model, key) for key, val in self.model.param.defaults().items()\n", + " if key != 'name' and getattr(self.model, key) != val}\n", + " return state\n", + "\n", + " def _on_load(self):\n", + " if pn.state.location and pn.state.location.query_params:\n", + " self.model.param.update(pn.state.location.query_params)\n", + " self.param.trigger('generate_image')\n", + "\n", + " @param.depends('model.prompt', 'generate_image', watch=True)\n", + " def image(self):\n", + " if self._restore or not self.model.prompt:\n", + " return\n", + "\n", + " # user entered prompt, then hit generate; callback invoked on 'prompt';\n", + " # now event triggered from generate\n", + " if self._prompt_event and self.generate_image:\n", + " self._prompt_event = False\n", + " return\n", + " self._prompt_event = True if not self.generate_image else False\n", + "\n", + " with exec_time(f\"Generate {self.model.prompt}\"):\n", + " image = self.model()\n", + "\n", + " if len(self.gallery) == self.history.maxlen:\n", + " # Oldest element from history will be dropped\n", + " self.gallery.remove(self.gallery[0])\n", + "\n", + " self.gallery.append(pn.pane.PNG(image.resize((100, 100))))\n", + " # store full state in history\n", + " self.history.append((self._state, image))\n", + "\n", + " self._update_query_params()\n", + " # update seed at the end\n", + " self.model.seed = random.randint(*self.model.param.seed.bounds)\n", + " self._update_image_container(image)\n", + "\n", + " def _sidebar_widgets(self):\n", + " return pn.Param(self.model.param, widgets = {\n", + " 'height': pn.widgets.DiscreteSlider,\n", + " 'width': pn.widgets.DiscreteSlider,\n", + " 'guidance_scale': {'formatter': PrintfTickFormatter(format='%.1f')},\n", + " 'seed': pn.widgets.IntInput,\n", + " 'prompt': {'visible': False},\n", + " 'negative_prompt': {'visible': False},\n", + " 'generate': {'visible': False}})\n", + "\n", + " def _main_panel(self):\n", + " return pn.Column(pn.Row(pn.Column(self.model.param.prompt, self.model.param.negative_prompt,\n", + " sizing_mode='stretch_width'),\n", + " pn.Param(self.param.generate_image,\n", + " widgets={'generate_image': {'button_type': 'success',\n", + " 'height': 110, 'width': 30}})),\n", + " pn.Row(pn.panel(self._image_container, loading_indicator=True), self.gallery))\n", + "\n", + " def __panel__(self):\n", + " return pn.Row(\n", + " pn.Column(self._sidebar_widgets()),\n", + " pn.Column(self._main_panel(), sizing_mode='stretch_width'))\n", + "\n", + "\n", + "sdui = ModelUI(name='Stable Diffusion with Panel UI')\n", + "\n", + "sdui" + ] + }, + { + "cell_type": "markdown", + "id": "eddb6279", + "metadata": {}, + "source": [ + "The above app should work well in a notebook cell, but when we serve this as a standalone web page, it's nice to embed it in a full-page template (not shown here in the notebook for formatting reasons):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99e838c1", + "metadata": {}, + "outputs": [], + "source": [ + "logo_pn = \"\"\"\n", + " \"\"\"\n", + "\n", + "logo_diffusers = \"\"\"\n", + " \"\"\"\n", + "\n", + "desc = \"\"\"\n", + " The Panel library from\n", + " HoloViz\n", + " lets you make widget-controlled apps. This Panel app lets you use the\n", + " diffusers library to\n", + " generate images from pretrained diffusion models.\"\"\"\n", + "\n", + "template = pn.template.MaterialTemplate(title=sdui.name)\n", + "\n", + "template.sidebar.append(pn.Column(pn.Row(logo_diffusers, logo_pn),\n", + " pn.panel(desc, width=300, margin=(20, 5)),\n", + " sdui._sidebar_widgets()))\n", + "\n", + "template.main.append(pn.Column(sdui._main_panel(), sizing_mode='stretch_width'))\n", + "\n", + "template.servable();" + ] + }, + { + "cell_type": "markdown", + "id": "44a33899", + "metadata": {}, + "source": [ + "Now you can launch and share this app with `panel serve stable_diffusion.ipynb` or `anaconda-project run dashboard` or `anaconda-project run dashboard-m1` !" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/stable_diffusion/thumbnails/diffusers_logo.png b/stable_diffusion/thumbnails/diffusers_logo.png new file mode 100644 index 000000000..35a1803af Binary files /dev/null and b/stable_diffusion/thumbnails/diffusers_logo.png differ diff --git a/stable_diffusion/thumbnails/stable_diffusion.png b/stable_diffusion/thumbnails/stable_diffusion.png new file mode 100644 index 000000000..185ac2bb8 Binary files /dev/null and b/stable_diffusion/thumbnails/stable_diffusion.png differ