From 81a07f09a592b3ba5443aab7cf7a0ee1a5d145bb Mon Sep 17 00:00:00 2001 From: Hzfinfdu Date: Mon, 19 Aug 2024 16:31:33 +0800 Subject: [PATCH] Squashed 'TransformerLens/' changes from 9321ca0..cb5017a cb5017a Merge pull request #702 from TransformerLensOrg/dev 135adce add a demo for Patchscopes and Generation with Patching (#692) d6ab70a Improve attention masking (#699) 940863e Merge pull request #701 from TransformerLensOrg/dev e30f96b Update Gemma2 attention scale (#694) 464325a Merge pull request #700 from TransformerLensOrg/main dd537ba Merge pull request #688 from TransformerLensOrg/dev 05ed749 removed support for python 3.8 (#690) ba79a7e Added gemma-2 2b (#687) (#689) 332d8d7 Added gemma-2 2b (#687) 323967f Add support for Qwen2 (#662) 54a6210 docs: update Main_Demo.ipynb (#658) 2f658fe cleaned up notebook (#681) ded3964 added arena content as a notebook (#674) a0e45ef Adding RMSNorm to apply_ln_to_stack (#663) 7f12e27 NamesFilter can be a string (#679) bf64ede Move the HookedSAE / HookedSAETransformer warning to a less prominent part of the README (#676) b1c9381 Fix typo in `embed.py` docs (#677) cec7ed3 Merge pull request #675 from TransformerLensOrg/dev b96c1dd fix: fixing broken backward hooks change (#673) 67ed0d6 Fix attention result projection (#666) 929a34c Fix attention result projection (#666) 9872334 Updated README to have Bryce as the maintainer 97c4a14 Merge pull request #656 from TransformerLensOrg/dev 9ab6107 add tests for Attention (#639) e14e6db Added support for Gemma-2 (#650) b7028ab Mlp cleanup (#652) b155d08 Fix Out bias not being summed in attention component when using 4 bit precision (#654) e9d9cb2 Match Huggingface GPT2 implementation *exactly* (#645) daaac82 Move out pretrained weight conversions (#647) 46917c7 Fixed weight conversion (#648) 1286b0e Moved mixtral weights to another module (#646) cf17826 Add tests for ActivationCache (#643) aab8603 Match Huggingface MLP implementation exactly. (#641) eafefa9 Add tests for gated mlp (#638) 2f83ccf Add comparing-to-huggingface.ipynb. (#637) b8bfb53 Fix typo in Main_Demo.ipynb (#636) 3182364 Merge branch 'main' into dev f7a1809 Lock datasets version (#632) aecf804 Update README links to ARENA mech interp tutorials (#630) d31802e Encoder-Decoder (T5) support (#605) a92654f Unit tests loading from pretrained fill missing keys (#623) 4276f96 fixes more in Head_Detector_Demo.ipynb c10f4bd fixes in cell outputs 24a775a fixes in Attribution_Patching_Demo.ipynb df700e6 fixes demos pip install from unfound repos 567a627 Unit tests loading from pretrained fill missing keys (#623) 2ee51c5 Merge pull request #625 from anthonyduong9/fix-demos-pip-install-packages-from-unfound-repos 55cca8b fixes more in Head_Detector_Demo.ipynb 16ee6b0 fixes in cell outputs 7c91383 fixes in Attribution_Patching_Demo.ipynb ecbdfe0 fixes demos pip install from unfound repos 8397a73 Merge pull request #624 from joelburget/patch-4 a8ca059 Update Mixtral repo reference git-subtree-dir: TransformerLens git-subtree-split: cb5017ad0f30cde0d3ac0b0f863c27fbec964c28 --- .github/workflows/checks.yml | 4 +- README.md | 33 +- debugging/comparing-to-huggingface.ipynb | 991 +++++ demos/ARENA_Content.ipynb | 424 ++ demos/Activation_Patching_in_TL_Demo.ipynb | 2 +- demos/Attribution_Patching_Demo.ipynb | 2 +- demos/BERT.ipynb | 2 +- demos/Grokking_Demo.ipynb | 4 +- demos/Head_Detector_Demo.ipynb | 12 +- demos/Interactive_Neuroscope.ipynb | 52 +- demos/Main_Demo.ipynb | 16 +- demos/Othello_GPT.ipynb | 2 +- demos/Patchscopes_Generation_Demo.ipynb | 3776 +++++++++++++++++ demos/Qwen.ipynb | 2 +- demos/SVD_Interpreter_Demo.ipynb | 4 +- demos/Santa_Coder.ipynb | 2 +- demos/T5.ipynb | 724 ++++ demos/Tracr_to_Transformer_Lens_Demo.ipynb | 2 +- demos/test.ipynb | 78 - tests/acceptance/test_activation_cache.py | 526 ++- .../acceptance/test_hooked_encoder_decoder.py | 337 ++ tests/integration/test_attention_mask.py | 32 + tests/integration/test_cross_entropy_loss.py | 32 + tests/integration/test_hooks.py | 20 + tests/integration/test_match_huggingface.py | 45 + .../mlps/test_can_be_used_as_mlp.py | 92 + tests/unit/components/mlps/test_gated_mlp.py | 41 + tests/unit/components/mlps/test_mlp.py | 49 + tests/unit/components/mlps/test_moe.py | 21 + tests/unit/components/test_attention.py | 100 + .../test_activation_function_factory.py | 20 + tests/unit/factories/test_mlp_factory.py | 74 + .../pretrained_weight_conversions/test_neo.py | 52 + ...p.py => test_hooked_transformer_config.py} | 28 + .../test_loading_from_pretrained_utilities.py | 72 + transformer_lens/ActivationCache.py | 37 +- transformer_lens/HookedEncoderDecoder.py | 416 ++ transformer_lens/HookedTransformer.py | 42 +- transformer_lens/HookedTransformerConfig.py | 43 +- transformer_lens/__init__.py | 2 + transformer_lens/components/__init__.py | 9 +- .../components/abstract_attention.py | 207 +- transformer_lens/components/bert_block.py | 5 +- transformer_lens/components/bert_embed.py | 8 +- transformer_lens/components/embed.py | 2 +- transformer_lens/components/gated_mlp.py | 143 - .../components/grouped_query_attention.py | 38 +- transformer_lens/components/mlp.py | 81 - .../components/mlps/can_be_used_as_mlp.py | 75 + transformer_lens/components/mlps/gated_mlp.py | 73 + .../components/mlps/gated_mlp_4bit.py | 77 + transformer_lens/components/mlps/mlp.py | 49 + transformer_lens/components/mlps/moe.py | 113 + transformer_lens/components/moe.py | 65 - transformer_lens/components/t5_attention.py | 140 + transformer_lens/components/t5_block.py | 156 + .../components/transformer_block.py | 109 +- transformer_lens/components/unembed.py | 12 +- .../factories/activation_function_factory.py | 37 + transformer_lens/factories/mlp_factory.py | 21 + transformer_lens/hook_points.py | 4 +- transformer_lens/loading_from_pretrained.py | 1366 +----- transformer_lens/pretrained/__init__.py | 0 .../pretrained/weight_conversions/__init__.py | 20 + .../pretrained/weight_conversions/bert.py | 65 + .../pretrained/weight_conversions/bloom.py | 57 + .../pretrained/weight_conversions/coder.py | 63 + .../pretrained/weight_conversions/gemma.py | 95 + .../pretrained/weight_conversions/gpt2.py | 60 + .../pretrained/weight_conversions/gptj.py | 50 + .../pretrained/weight_conversions/llama.py | 96 + .../pretrained/weight_conversions/mingpt.py | 63 + .../pretrained/weight_conversions/mistral.py | 57 + .../pretrained/weight_conversions/mixtral.py | 73 + .../pretrained/weight_conversions/nanogpt.py | 88 + .../weight_conversions/neel_solu_old.py | 38 + .../pretrained/weight_conversions/neo.py | 49 + .../pretrained/weight_conversions/neox.py | 59 + .../pretrained/weight_conversions/opt.py | 84 + .../pretrained/weight_conversions/phi.py | 64 + .../pretrained/weight_conversions/phi3.py | 56 + .../pretrained/weight_conversions/qwen.py | 65 + .../pretrained/weight_conversions/qwen2.py | 76 + .../pretrained/weight_conversions/t5.py | 101 + .../utilities/activation_functions.py | 26 + transformer_lens/utilities/addmm.py | 35 + transformer_lens/utilities/attention.py | 38 + transformer_lens/utilities/devices.py | 7 +- transformer_lens/utils.py | 27 +- 89 files changed, 10685 insertions(+), 1830 deletions(-) create mode 100644 debugging/comparing-to-huggingface.ipynb create mode 100644 demos/ARENA_Content.ipynb create mode 100644 demos/Patchscopes_Generation_Demo.ipynb create mode 100644 demos/T5.ipynb delete mode 100644 demos/test.ipynb create mode 100644 tests/acceptance/test_hooked_encoder_decoder.py create mode 100644 tests/integration/test_cross_entropy_loss.py create mode 100644 tests/integration/test_match_huggingface.py create mode 100644 tests/unit/components/mlps/test_can_be_used_as_mlp.py create mode 100644 tests/unit/components/mlps/test_gated_mlp.py create mode 100644 tests/unit/components/mlps/test_mlp.py create mode 100644 tests/unit/components/mlps/test_moe.py create mode 100644 tests/unit/components/test_attention.py create mode 100644 tests/unit/factories/test_activation_function_factory.py create mode 100644 tests/unit/factories/test_mlp_factory.py create mode 100644 tests/unit/pretrained_weight_conversions/test_neo.py rename tests/unit/{hooked_transformer_config/test_unwrap.py => test_hooked_transformer_config.py} (56%) create mode 100644 tests/unit/test_loading_from_pretrained_utilities.py create mode 100644 transformer_lens/HookedEncoderDecoder.py delete mode 100644 transformer_lens/components/gated_mlp.py delete mode 100644 transformer_lens/components/mlp.py create mode 100644 transformer_lens/components/mlps/can_be_used_as_mlp.py create mode 100644 transformer_lens/components/mlps/gated_mlp.py create mode 100644 transformer_lens/components/mlps/gated_mlp_4bit.py create mode 100644 transformer_lens/components/mlps/mlp.py create mode 100644 transformer_lens/components/mlps/moe.py delete mode 100644 transformer_lens/components/moe.py create mode 100644 transformer_lens/components/t5_attention.py create mode 100644 transformer_lens/components/t5_block.py create mode 100644 transformer_lens/factories/activation_function_factory.py create mode 100644 transformer_lens/factories/mlp_factory.py create mode 100644 transformer_lens/pretrained/__init__.py create mode 100644 transformer_lens/pretrained/weight_conversions/__init__.py create mode 100644 transformer_lens/pretrained/weight_conversions/bert.py create mode 100644 transformer_lens/pretrained/weight_conversions/bloom.py create mode 100644 transformer_lens/pretrained/weight_conversions/coder.py create mode 100644 transformer_lens/pretrained/weight_conversions/gemma.py create mode 100644 transformer_lens/pretrained/weight_conversions/gpt2.py create mode 100644 transformer_lens/pretrained/weight_conversions/gptj.py create mode 100644 transformer_lens/pretrained/weight_conversions/llama.py create mode 100644 transformer_lens/pretrained/weight_conversions/mingpt.py create mode 100644 transformer_lens/pretrained/weight_conversions/mistral.py create mode 100644 transformer_lens/pretrained/weight_conversions/mixtral.py create mode 100644 transformer_lens/pretrained/weight_conversions/nanogpt.py create mode 100644 transformer_lens/pretrained/weight_conversions/neel_solu_old.py create mode 100644 transformer_lens/pretrained/weight_conversions/neo.py create mode 100644 transformer_lens/pretrained/weight_conversions/neox.py create mode 100644 transformer_lens/pretrained/weight_conversions/opt.py create mode 100644 transformer_lens/pretrained/weight_conversions/phi.py create mode 100644 transformer_lens/pretrained/weight_conversions/phi3.py create mode 100644 transformer_lens/pretrained/weight_conversions/qwen.py create mode 100644 transformer_lens/pretrained/weight_conversions/qwen2.py create mode 100644 transformer_lens/pretrained/weight_conversions/t5.py create mode 100644 transformer_lens/utilities/activation_functions.py create mode 100644 transformer_lens/utilities/addmm.py create mode 100644 transformer_lens/utilities/attention.py diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 6e92d25..1b71d37 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -44,7 +44,6 @@ jobs: strategy: matrix: python-version: - - "3.8" - "3.9" - "3.10" steps: @@ -123,6 +122,7 @@ jobs: notebook: # - "Activation_Patching_in_TL_Demo" # - "Attribution_Patching_Demo" + - "ARENA_Content" - "BERT" - "Exploratory_Analysis_Demo" # - "Grokking_Demo" @@ -133,6 +133,8 @@ jobs: - "Main_Demo" # - "No_Position_Experiment" - "Othello_GPT" + - "Patchscopes_Generation_Demo" + # - "T5" steps: - uses: actions/checkout@v3 - name: Install Poetry diff --git a/README.md b/README.md index e6242f2..80f350c 100644 --- a/README.md +++ b/README.md @@ -10,20 +10,11 @@ CD](https://github.com/TransformerLensOrg/TransformerLens/actions/workflows/chec [![Docs CD](https://github.com/TransformerLensOrg/TransformerLens/actions/workflows/pages/pages-build-deployment/badge.svg)](https://github.com/TransformerLensOrg/TransformerLens/actions/workflows/pages/pages-build-deployment) -A Library for Mechanistic Interpretability of Generative Language Models. +A Library for Mechanistic Interpretability of Generative Language Models. Maintained by [Bryce Meyer](https://github.com/bryce13950) and created by [Neel Nanda](https://neelnanda.io/about) [![Read the Docs Here](https://img.shields.io/badge/-Read%20the%20Docs%20Here-blue?style=for-the-badge&logo=Read-the-Docs&logoColor=white&link=https://TransformerLensOrg.github.io/TransformerLens/)](https://TransformerLensOrg.github.io/TransformerLens/) -| :exclamation: HookedSAETransformer Removed | -|-----------------------------------------------| - -Hooked SAE has been removed from TransformerLens 2.0. The functionality is being moved to -[SAELens](http://github.com/jbloomAus/SAELens). For more information on this release, please see the -accompanying -[announcement](https://transformerlensorg.github.io/TransformerLens/content/news/release-2.0.html) -for details on what's new, and the future of TransformerLens. - This is a library for doing [mechanistic interpretability](https://distill.pub/2020/circuits/zoom-in/) of GPT-2 Style language models. The goal of mechanistic interpretability is to take a trained model and reverse engineer the algorithms @@ -56,7 +47,7 @@ logits, activations = model.run_with_cache("Hello World") ## Key Tutorials * [Introduction to the Library and Mech - Interp](https://arena-ch1-transformers.streamlit.app/[1.2]_Intro_to_Mech_Interp) + Interp](https://arena3-chapter1-transformer-interp.streamlit.app/[1.2]_Intro_to_Mech_Interp) * [Demo of Main TransformerLens Features](https://neelnanda.io/transformer-lens-demo) ## Gallery @@ -111,20 +102,20 @@ you would like to help, please try working on one! The standard answer to "why h yet" is just that there aren't enough people! Key resources: * [A Guide to Getting Started in Mechanistic Interpretability](https://neelnanda.io/getting-started) -* [ARENA Mechanistic Interpretability Tutorials](https://arena-ch1-transformers.streamlit.app/) from +* [ARENA Mechanistic Interpretability Tutorials](https://arena3-chapter1-transformer-interp.streamlit.app/) from Callum McDougall. A comprehensive practical introduction to mech interp, written in TransformerLens - full of snippets to copy and they come with exercises and solutions! Notable tutorials: * [Coding GPT-2 from - scratch](https://arena-ch1-transformers.streamlit.app/[1.1]_Transformer_from_Scratch), with + scratch](https://arena3-chapter1-transformer-interp.streamlit.app/[1.1]_Transformer_from_Scratch), with accompanying video tutorial from me ([1](https://neelnanda.io/transformer-tutorial) [2](https://neelnanda.io/transformer-tutorial-2)) - a good introduction to transformers * [Introduction to Mech Interp and - TransformerLens](https://arena-ch1-transformers.streamlit.app/[1.2]_Intro_to_Mech_Interp): An + TransformerLens](https://arena3-chapter1-transformer-interp.streamlit.app/[1.2]_Intro_to_Mech_Interp): An introduction to TransformerLens and mech interp via studying induction heads. Covers the foundational concepts of the library * [Indirect Object - Identification](https://arena-ch1-transformers.streamlit.app/[1.3]_Indirect_Object_Identification): + Identification](https://arena3-chapter1-transformer-interp.streamlit.app/[1.3]_Indirect_Object_Identification): a replication of interpretability in the wild, that covers standard techniques in mech interp such as [direct logit attribution](https://dynalist.io/d/n2ZWtnoYHrU1s4vnFSAQ519J#z=disz2gTx-jooAcR0a5r8e7LZ), @@ -156,10 +147,18 @@ discussions about eg supporting important new use cases, or if you want to make contributions to the library and want a maintainer's opinion. We'd also love for you to come and share your projects on the Slack! +| :exclamation: HookedSAETransformer Removed | +|-----------------------------------------------| + +Hooked SAE has been removed from TransformerLens in version 2.0. The functionality is being moved to +[SAELens](http://github.com/jbloomAus/SAELens). For more information on this release, please see the +accompanying +[announcement](https://transformerlensorg.github.io/TransformerLens/content/news/release-2.0.html) +for details on what's new, and the future of TransformerLens. + ## Credits -This library was created by **[Neel Nanda](https://neelnanda.io)** and is maintained by **Joseph -Bloom**. +This library was created by **[Neel Nanda](https://neelnanda.io)** and is maintained by **[Bryce Meyer](https://github.com/bryce13950)**. The core features of TransformerLens were heavily inspired by the interface to [Anthropic's excellent Garcon tool](https://transformer-circuits.pub/2021/garcon/index.html). Credit to Nelson diff --git a/debugging/comparing-to-huggingface.ipynb b/debugging/comparing-to-huggingface.ipynb new file mode 100644 index 0000000..b79ae8a --- /dev/null +++ b/debugging/comparing-to-huggingface.ipynb @@ -0,0 +1,991 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "bd0160b8-de87-4a9c-bfe2-b678e89cde89", + "metadata": {}, + "source": [ + "Compare the TransformerLens implementation of a model to the Huggingface implementation. This script was originally use in https://github.com/TransformerLensOrg/TransformerLens/issues/570 to debug Mixtral." + ] + }, + { + "cell_type": "markdown", + "id": "3e1c21b4-5a82-4838-ae2a-0c7be2708b65", + "metadata": {}, + "source": [ + "## setup" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "4fb7e0bc-4ef5-40c8-8222-336e83bd6e66", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install transformers matplotlib" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "cba8adb4-03a4-4061-b62b-18bcc091b8af", + "metadata": {}, + "outputs": [], + "source": [ + "# Everything can be configured here\n", + "model_id = \"\"\n", + "text = \"Hello my name is\"\n", + "device=\"cpu\"\n", + "# Set this to true to trigger hugging face login if needed\n", + "gated_model = False" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "be241e96-3bbb-46a4-a4d4-0213eb094d6e", + "metadata": {}, + "outputs": [], + "source": [ + "# If you need a specific head, uncomment this and specify the head\n", + "# %pip install git+https://github.com/TransformerLensOrg/TransformerLens.git@head\n", + "# Otherwise, for running this on the latest release\n", + "%pip install transformer_lens" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "6d7341d8-881c-41c3-8199-ae9590d51a5a", + "metadata": {}, + "outputs": [], + "source": [ + "if gated_model:\n", + " %pip install huggingface_hub\n", + " from huggingface_hub import login\n", + " login()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "ec6d8055-dcdd-4db9-b13a-28860292ad47", + "metadata": {}, + "outputs": [], + "source": [ + "import einops\n", + "from torch.testing import assert_close\n", + "import torch\n", + "import matplotlib.pyplot as plt\n", + "from transformer_lens import HookedTransformer\n", + "from transformers import AutoModelForCausalLM, AutoTokenizer" + ] + }, + { + "cell_type": "markdown", + "id": "0ceea776-25d6-44b3-99e6-f38c30064954", + "metadata": {}, + "source": [ + "## TransformerLens model" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "2c3cb338-cf1b-4775-b278-302999164e6a", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/conda/lib/python3.10/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "1f92d32c0f474ad5a907559e872b7b7f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading checkpoint shards: 0%| | 0/19 [00:00 (n h) m\") ==\n", + " hf_model.model.layers[0].self_attn.q_proj.weight\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "83649934-f06b-4f94-8004-59b8d4098589", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(torch.Size([32, 4096, 128]), torch.Size([1024, 4096]))" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tl_model.blocks[0].attn.W_K.shape, hf_model.model.layers[0].self_attn.k_proj.weight.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "4fa20cf5-b720-4946-a7e5-e1d2e6277f6c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor(True)" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.all(\n", + " einops.reduce(\n", + " tl_model.blocks[0].attn.W_K, \"(n repeat) m h -> (n h) m\",\n", + " 'max',\n", + " n=tl_model.cfg.n_key_value_heads,\n", + " repeat=4) ==\n", + " hf_model.model.layers[0].self_attn.k_proj.weight\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "ef6f7ea9-ef0b-4091-8d00-504b481fc59a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor(True)" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.all(\n", + " einops.reduce(\n", + " tl_model.blocks[0].attn.W_V, \"(n repeat) m h -> (n h) m\",\n", + " 'max',\n", + " n=tl_model.cfg.n_key_value_heads,\n", + " repeat=4) ==\n", + " hf_model.model.layers[0].self_attn.v_proj.weight\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "04b8f4be-ce7d-4dc2-acda-d023c721525c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor(True)" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.all(\n", + " einops.rearrange(tl_model.blocks[0].attn.W_O, \"n h m -> m (n h)\") ==\n", + " hf_model.model.layers[0].self_attn.o_proj.weight\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "1e10ed87-31b5-4c1c-b726-7a3f49fbd136", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Parameter containing:\n", + "tensor([[0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " ...,\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.]], requires_grad=True)" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tl_model.blocks[0].attn.b_Q" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "6caf9d98-adb2-45e7-8357-34288b2156f2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor(True)" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.all(hf_model.model.layers[0].block_sparse_moe.gate.weight.T == tl_model.blocks[0].mlp.W_gate)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "00e9ea5d-74c2-4c2a-8e9d-6fc196cb8fc3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(torch.float32, torch.float32)" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "hf_model.model.layers[0].block_sparse_moe.gate.weight.dtype, tl_model.blocks[0].mlp.W_gate.dtype" + ] + }, + { + "cell_type": "markdown", + "id": "df857ae9-8cae-438b-941a-f5050709953e", + "metadata": {}, + "source": [ + "## Compare Layer Outputs" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "06e11c39-786a-4b09-8f5e-18a558259fb1", + "metadata": {}, + "outputs": [], + "source": [ + "test_tensor = torch.randn((1, 1, 4096,))" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "f0186768-d8f0-4d55-a94c-606c4ba3f7ca", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(tensor([[[ 0.3073, 0.6716, -1.5622, ..., 0.1159, 0.7766, -0.2877]]],\n", + " grad_fn=),)" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "hf_model.model.layers[0](test_tensor)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "432bb274-b499-44c9-98d1-777d03425daa", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[[ 0.3073, 0.6716, -1.5622, ..., 0.1159, 0.7766, -0.2877]]],\n", + " grad_fn=)" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tl_model.blocks[0](test_tensor)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "4a440811-e7f0-4092-b8e7-f7cac80dc84a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[[True, True, True, ..., True, True, True]]])" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "hf_model.model.layers[0](test_tensor)[0] == tl_model.blocks[0](test_tensor)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "8ed65bb3-6990-48e5-9ef2-1becd9dfaffc", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(0.7765660285949707, 0.7765660285949707)" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "hf_model.model.layers[0](test_tensor)[0][0, 0, -2].item(), tl_model.blocks[0](test_tensor)[0, 0, -2].item()" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "763f6c2e-b71f-4724-b2f7-f79a9ab29caf", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor(3153)" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.sum(hf_model.model.layers[0](test_tensor)[0] == tl_model.blocks[0](test_tensor))" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "f41aa0eb-6386-476d-ae1a-b5e7e06893aa", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkQAAAHFCAYAAAAT5Oa6AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8fJSN1AAAACXBIWXMAAA9hAAAPYQGoP6dpAABJxElEQVR4nO3df3zP9eL///ur7bWfZjazX5lZQsoqqZh+IMzI7xM6OouSdEg03nXUu0yJkwrnzEmOGFFR50S/tEyhNPKjo+J4S/Ij2Ux+zO/tZR7fP3z3/HjZD8zsNXverpfLLhfPx/Pxejwfj8frOe4ez+fz9XIYY4wAAABs7CpPdwAAAMDTCEQAAMD2CEQAAMD2CEQAAMD2CEQAAMD2CEQAAMD2CEQAAMD2CEQAAMD2CEQAAMD2CES4Ys2ePVsOh8P68fPzU2RkpNq2basJEyYoNze32GtSU1PlcDjcygoKCvTYY48pKipKXl5euvnmmyVJBw4c0P3336/w8HA5HA716NGjEkZ15dqxY4fb++FwOFSzZk3ddNNNmjJligoLC8vV7uLFi5Wamlqxna1iHA6HHn/8cU93o0Jt3rxZAwYMUL169eTj46OwsDB17txZn3322SW1+/rrr2v27NkV08nzOH78uFJTU7V8+fJKOR48y9vTHQAuVXp6uq677jq5XC7l5uZq5cqVevnll/Xqq69qwYIFat++vVX3kUceUVJSktvrp02bpunTpystLU3NmzdXjRo1JEkvvviiFi5cqFmzZqlBgwYKDQ2t1HFdqYYNG6Z+/fpJkg4dOqSPPvpITz75pH799Ve99tprF93e4sWL9Y9//KPah6Lq5IMPPlC/fv10zTXX6LnnnlPjxo21d+9epaenq3Pnzvqf//kfTZw4sVxtv/766woLC9OAAQMqttMlOH78uMaOHStJatOmzWU/HjyLQIQrXtOmTXXrrbda23/4wx/05JNP6s4771SvXr20detWRURESJLq1q2runXrur1+48aN8vf3L/Y/9I0bN6pBgwZ64IEHKqyvJ06ckL+/f4W1VxXVq1dPLVu2tLaTkpK0ceNGvfvuu+UKRKh6jh8/roCAgBL3bdu2TcnJyYqPj9fy5csVGBho7evdu7f+/Oc/65VXXtEtt9yi+++/v7K6DJwXl8xQLdWrV0+vvfaajhw5ounTp1vl514yczgcevPNN3XixAnrMk/RpbilS5dq8+bNVnnRsnlBQYHGjRun6667Tr6+vqpTp44eeugh7du3z60P9evXV5cuXfTBBx+oWbNm8vPzs/63mZOTo8GDB6tu3bry8fFRXFycxo4dq1OnTlmvL7oE9eqrr2rSpEmKi4tTjRo1lJCQoNWrVxcb87fffquuXbuqdu3a8vPzU4MGDTRixAi3Olu3blW/fv0UHh4uX19fNWnSRP/4xz/c6pw+fVrjxo1T48aN5e/vr1q1aunGG2/U3/72t3K9F5IUHBwsp9NZrHzBggVKSEhQYGCgatSooY4dO+o///mPtX/AgAFW/86+FLdjxw717t1bN9xwg1t7Xbt2lcPh0Pvvv2+Vfffdd3I4HPr444+tsguZf+ni3+uMjAzdcsst8vf313XXXadZs2aVe85KmqvExERFRUXJ399fTZo00V/+8hcdO3bMqjN37lw5HA6tWrWq2OtfeOEFOZ1O7dmzxypbunSp2rVrp5o1ayogIEB33HGHvvjiC7fXFf3OfPfdd7rvvvsUEhKiBg0alNrPyZMn6/jx40pLS3MLQ0Vee+011apVSy+99FKxY5yr6Hdxx44dks7M86ZNm7RixQrrXKhfv74kafny5XI4HJo3b55SUlIUGRkpf39/tW7d2u2cks6s9pS04jNgwACrvR07dqhOnTqSpLFjx1rHK1qZ2rdvnx599FHFxMRY58Ydd9yhpUuXljo3qOIMcIVKT083kszatWtL3H/06FHj5eVl2rVrZ5WNGTPGnH3ar1q1ynTu3Nn4+/ubVatWmVWrVpmcnByzatUq06xZM3PNNddY5Xl5eaawsNAkJSWZwMBAM3bsWJOZmWnefPNNc/XVV5vrr7/eHD9+3Go7NjbWREVFmWuuucbMmjXLLFu2zKxZs8ZkZ2ebmJgYExsba6ZPn26WLl1qXnzxRePr62sGDBhgvX779u1Gkqlfv75JSkoyixYtMosWLTLx8fEmJCTEHDp0yKqbkZFhnE6nufHGG83s2bPNl19+aWbNmmXuv/9+q86mTZtMcHCwiY+PN2+99ZZZsmSJGTlypLnqqqtMamqqVW/ChAnGy8vLjBkzxnzxxRcmIyPDTJkyxa1OSYr6+/LLLxuXy2VcLpf5/fffzcyZM423t7d59tln3eq/9NJLxuFwmIcffth88skn5oMPPjAJCQkmMDDQbNq0yRhjzM8//2zuu+8+I8l6H1atWmVOnjxp3njjDSPJ7NmzxxhjjMvlMkFBQcbf398MGjTIOs7LL79svL29zeHDh40x5oLn/2Lf67p165rrr7/evPXWW+bzzz83vXv3NpLMihUrypw3Y4yRZIYOHVpmnRdffNFMnjzZfPrpp2b58uXmjTfeMHFxcaZt27ZWnfz8fBMZGWkeeOABt9e6XC4THR1tevfubZXNnTvXOBwO06NHD/PBBx+Yjz/+2HTp0sV4eXmZpUuXWvWKfmdiY2PN008/bTIzM82iRYtK7WejRo1MREREmWPp06ePkWSys7PdjnGuot/x7du3G2OM+e6778w111xjmjVrZp0L3333nTHGmGXLlhlJJiYmxnTv3t18/PHHZt68eebaa681NWvWNNu2bbPabd26tWndunWx4/Xv39/ExsYaY4w5efKkycjIMJLMwIEDreP9/PPPxhhjOnbsaOrUqWP++c9/muXLl5tFixaZ559/3syfP7/MsaPqIhDhinW+QGSMMREREaZJkybWdkl/8fbv398EBgYWe23r1q3NDTfc4Fb27rvvGknm3//+t1v52rVrjSTz+uuvW2WxsbHGy8vLbNmyxa3u4MGDTY0aNczOnTvdyl999VUjyQoDRQEjPj7enDp1yqq3Zs0aI8m8++67VlmDBg1MgwYNzIkTJ0qdi44dO5q6deuavLw8t/LHH3/c+Pn5mQMHDhhjjOnSpYu5+eabS22nNEX9LelnwIABbmPYtWuX8fb2NsOGDXNr48iRIyYyMtL06dPHKhs6dGiJ/1j+/PPPRpJ56623jDHGrFy50kgyTz31lImLi7PqdejQwbRq1cravtD5v9j32s/Pz63NEydOmNDQUDN48ODzzt2FBKKznT592rhcLrNixQojyXz//ffWvjFjxhgfHx+zd+9eq2zBggVu4ezYsWMmNDTUdO3a1a3dwsJCc9NNN5nbb7/drT1J5vnnn7+gvvn5+ZmWLVuWWefpp582ksy3337rdoxznRuIjDHmhhtuKDHMFAWiW265xZw+fdoq37Fjh3E6neaRRx6xyi4kEBljzL59+4wkM2bMmGJ1a9SoYUaMGFHmOHFl4ZIZqjVjTIW298knn6hWrVrq2rWrTp06Zf3cfPPNioyMLPY0yo033qhGjRoVa6Nt27aKjo52a6NTp06SpBUrVrjVv/fee+Xl5eXWpiTt3LlTkvTTTz9p27ZtGjhwoPz8/Ers98mTJ/XFF1+oZ8+eCggIcDtu586ddfLkSesy3O23367vv/9eQ4YM0eeff67Dhw9f1BwNHz5ca9eu1dq1a7Vs2TKNHz9e7733nv74xz9adT7//HOdOnVKDz74oFtf/Pz81Lp16wt6qqdBgwaqX7++dYkiMzNT8fHx+tOf/qTt27dr27Ztys/P18qVK91urL/Q+b/Y9/rmm29WvXr1rG0/Pz81atTIep8u1S+//KJ+/fopMjJSXl5ecjqdat26taQzT3QV+fOf/yxJmjFjhlU2depUxcfH6+6775YkZWVl6cCBA+rfv7/b2E6fPq2kpCStXbvW7VKcdObevIpS9HtZ0mWyS9WvXz+3dmNjY9WqVSstW7asQo9z++23a/bs2Ro3bpxWr14tl8tVoe2j8nFTNaqtY8eOaf/+/YqPj6+wNvfu3atDhw7Jx8enxP2///6723ZUVFSJbXz88ccl3lNTUhu1a9d22/b19ZV05gZtSdb9LOfeLH62/fv369SpU0pLS1NaWlqZxx09erQCAwM1b948vfHGG/Ly8tLdd9+tl19+2e3m9dLUrVvXrV6bNm3kcDg0evRoff755+rYsaP27t0rSbrttttKbOOqqy7s/2rt2rVTRkaGpDP3w3To0EHx8fGKiIjQ0qVL1bBhQ504ccItEF3o/F/se33u+ySdea+K3qdLcfToUd11113y8/PTuHHj1KhRIwUEBOjXX39Vr1693I4RERGhvn37avr06frLX/6iTZs26euvv3a7l65o/u+7775Sj3ngwAG3e4BKOpdLUq9ePW3fvr3MOkX3BMXExFxQmxcjMjKyxLLvv/++Qo+zYMECjRs3Tm+++aaee+451ahRQz179tTEiRNL7AOqPgIRqq1PP/1UhYWFFfq4bFhYmGrXrm39I3yuoKAgt+2S/gccFhamG2+80e2m0rNFR0dfVJ+KbvzcvXt3qXVCQkLk5eWl5ORkDR06tMQ6cXFxkiRvb2+lpKQoJSVFhw4d0tKlS/XMM8+oY8eO+vXXX0t9uqgsRata33//vTp27KiwsDBJ0r/+9S/FxsZedHtF2rVrp5kzZ2rNmjX69ttv9b//+7+SpHvuuUeZmZnauXOnatSo4fbU24XO/8W+15fTl19+qT179mj58uXWqpB05mMNSjJ8+HDNnTtXH374oTIyMlSrVi23pyWL5j8tLc1tbs5W9GRmkQtdzenQoYP+8Y9/aPXq1SW2ffz4cWVmZqpp06ZWcCha2czPz7cCv1Q8dF6InJycEsvODqx+fn7Ky8srVu9ijhcWFqYpU6ZoypQp2rVrlz766CP95S9/UW5ubqnnDKo2AhGqpV27dmnUqFEKDg7W4MGDK6zdLl26aP78+SosLFSLFi3K3cbixYvVoEEDhYSEXHKfGjVqpAYNGmjWrFlKSUlx+welSEBAgNq2bav//Oc/uvHGG0td9ThXrVq1dN999+m3337TiBEjtGPHDl1//fUX3ccNGzZIksLDwyVJHTt2lLe3t7Zt23beSzFnr4id+5EF7dq1k8Ph0HPPPaerrrrKuiTUvn17/c///I927typu+++22016ELnvyLe64pSFEbOfW/PXvU5W/PmzdWqVSu9/PLL2rhxox599FG31Z477rhDtWrV0n//+98K/0DIJ598UrNmzdKwYcOKPXYvSaNGjdLBgwc1bdo0q6zoya4ffvjBbdXw7CcDi5xv1e3dd99VSkqKNWc7d+5UVlaWHnzwQbfjvf/++24BbP/+/crKylLNmjXdjiXpvKt89erV0+OPP64vvvhC33zzTZl1UXURiHDF27hxo3UPRG5urr7++mulp6fLy8tLCxcutFZQKsL999+vt99+W507d9bw4cN1++23y+l0avfu3Vq2bJm6d++unj17ltnGCy+8oMzMTLVq1UpPPPGEGjdurJMnT2rHjh1avHix3njjjTIvf5XkH//4h7p27aqWLVvqySefVL169bRr1y59/vnnevvttyVJf/vb33TnnXfqrrvu0p///GfVr19fR44c0c8//6yPP/5YX375paQzj64XfbZTnTp1tHPnTk2ZMkWxsbFq2LDhefuya9cu636kY8eOadWqVZowYYJiY2PVq1cvSWf+QXrhhRf07LPP6pdfflFSUpJCQkK0d+9erVmzRoGBgdZHFBRd8nz55ZfVqVMneXl5WaEuPDxcTZs21ZIlS9S2bVtr9ap9+/Y6cOCADhw4oEmTJpVr/ivivb4Y27Zt07/+9a9i5ddff71atWqlkJAQPfbYYxozZoycTqfefvvtMi8DDR8+XH379pXD4dCQIUPc9tWoUUNpaWnq37+/Dhw4oPvuu0/h4eHat2+fvv/+e+3bt88tsFyMBg0aaO7cuXrggQd02223KSUlxfpgxlmzZumzzz7TqFGj1LdvX+s1nTt3VmhoqAYOHKgXXnhB3t7emj17tn799ddi7cfHx2v+/PlasGCBrrnmGvn5+bldFs/NzVXPnj01aNAg5eXlacyYMfLz89Po0aOtOsnJyZo+fbr+9Kc/adCgQdq/f78mTpzoFoakM6uAsbGx+vDDD9WuXTuFhoYqLCxMISEhatu2rfr166frrrtOQUFBWrt2rTIyMqxzHFcgT9/VDZRX0RMoRT8+Pj4mPDzctG7d2owfP97k5uYWe82lPmVmzJlHmF999VVz0003GT8/P1OjRg1z3XXXmcGDB5utW7da9WJjY829995bYt/37dtnnnjiCRMXF2ecTqcJDQ01zZs3N88++6w5evSoMeb/PbX1yiuvFHu9SnjyZdWqVaZTp04mODjY+Pr6mgYNGpgnn3zSrc727dvNww8/bK6++mrjdDpNnTp1TKtWrcy4ceOsOq+99ppp1aqVCQsLMz4+PqZevXpm4MCBZseOHSWO5ey2dc7TZX5+fqZRo0ZmxIgR1iPWZ1u0aJFp27atqVmzpvH19TWxsbHmvvvuc3vsOz8/3zzyyCOmTp06xuFwFHvq6MknnzSSzEsvveTWdsOGDY0k88MPPxQ77oXMvzGX/l6X9jTTuc6dt7N/it7nrKwsk5CQYAICAkydOnXMI488Yr777jsjyaSnpxdrMz8/3/j6+pqkpKRSj7tixQpz7733mtDQUON0Os3VV19t7r33XvP+++9bdYp+Z/bt23fecZxt06ZNpn///qZu3brWHCclJZlPP/20xPpr1qwxrVq1MoGBgebqq682Y8aMMW+++Wax93vHjh0mMTHRBAUFWR8HYMz/e8ps7ty55oknnjB16tQxvr6+5q677jLr1q0rdrw5c+aYJk2aGD8/P3P99debBQsWFHvKzBhjli5dapo1a2Z8fX2NJNO/f39z8uRJ89hjj5kbb7zR1KxZ0/j7+5vGjRubMWPGmGPHjl3UPKHqcBhTwY/hAAA87uOPP1a3bt306aefqnPnzp7uzmW3fPlytW3bVu+//36ZN4sDpeGSGQBUI//973+1c+dOjRw5UjfffLP1cQIAysbnEAFANTJkyBB169ZNISEhevfddy/LZ/0A1RGXzAAAgO2xQgQAAGyPQAQAAGyPQAQAAGyPp8wu0OnTp7Vnzx4FBQVxkyIAAFcIY4yOHDmi6OjoMr8nkUB0gfbs2XNZvogQAABcfr/++muZ3wJAILpARV/k+Ouvvxb7ePfqxuVyacmSJUpMTCz1G8FR8Zh3z2HuPYN59xw7zf3hw4cVExNz3i9kJhBdoKLLZDVr1rRFIAoICFDNmjWr/S9KVcK8ew5z7xnMu+fYce7Pd7sLN1UDAADbIxABAADbIxABAADbIxABAADbIxABAADbIxABAADbIxABAADbIxABAADbIxABAADbIxABAADbIxABAADbIxABAADbIxABAADbIxABAADbIxABAADb8/Z0BwCgqujSRSooKH3/smWV1xcAlYsVIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHsEIgAAYHseDUTTpk3TjTfeqJo1a6pmzZpKSEjQZ599Zu03xig1NVXR0dHy9/dXmzZttGnTJrc28vPzNWzYMIWFhSkwMFDdunXT7t273eocPHhQycnJCg4OVnBwsJKTk3Xo0KHKGCIAALgCeDQQ1a1bV3/961+1bt06rVu3Tvfcc4+6d+9uhZ6JEydq0qRJmjp1qtauXavIyEh16NBBR44csdoYMWKEFi5cqPnz52vlypU6evSounTposLCQqtOv379tGHDBmVkZCgjI0MbNmxQcnJypY8XAABUTd6ePHjXrl3dtl966SVNmzZNq1ev1vXXX68pU6bo2WefVa9evSRJc+bMUUREhN555x0NHjxYeXl5mjlzpubOnav27dtLkubNm6eYmBgtXbpUHTt21ObNm5WRkaHVq1erRYsWkqQZM2YoISFBW7ZsUePGjSt30AAAoMrxaCA6W2Fhod5//30dO3ZMCQkJ2r59u3JycpSYmGjV8fX1VevWrZWVlaXBgwdr/fr1crlcbnWio6PVtGlTZWVlqWPHjlq1apWCg4OtMCRJLVu2VHBwsLKyskoNRPn5+crPz7e2Dx8+LElyuVxyuVwVPfwqpWh81X2cVQ3z7jlFc+50lj33vDUVi3Pec+w09xc6Ro8Hoh9//FEJCQk6efKkatSooYULF+r6669XVlaWJCkiIsKtfkREhHbu3ClJysnJkY+Pj0JCQorVycnJseqEh4cXO254eLhVpyQTJkzQ2LFji5UvWbJEAQEBFzfIK1RmZqanu2BLzLvnDBpU9twvXlxJHbEZznnPscPcHz9+/ILqeTwQNW7cWBs2bNChQ4f073//W/3799eKFSus/Q6Hw62+MaZY2bnOrVNS/fO1M3r0aKWkpFjbhw8fVkxMjBITE1WzZs3zjutK5nK5lJmZqQ4dOsjpdHq6O7bBvHtO0dzPmNFBLlfpc//JJ5XYKRvgnPccO8190RWe8/F4IPLx8dG1114rSbr11lu1du1a/e1vf9PTTz8t6cwKT1RUlFU/NzfXWjWKjIxUQUGBDh486LZKlJubq1atWll19u7dW+y4+/btK7b6dDZfX1/5+voWK3c6ndX+5Clip7FWJcy757hcThUUlD73vC2XB+e859hh7i90fFXuc4iMMcrPz1dcXJwiIyPdlvMKCgq0YsUKK+w0b95cTqfTrU52drY2btxo1UlISFBeXp7WrFlj1fn222+Vl5dn1QEAAPbm0RWiZ555Rp06dVJMTIyOHDmi+fPna/ny5crIyJDD4dCIESM0fvx4NWzYUA0bNtT48eMVEBCgfv36SZKCg4M1cOBAjRw5UrVr11ZoaKhGjRql+Ph466mzJk2aKCkpSYMGDdL06dMlSY8++qi6dOnCE2YAAECShwPR3r17lZycrOzsbAUHB+vGG29URkaGOnToIEl66qmndOLECQ0ZMkQHDx5UixYttGTJEgUFBVltTJ48Wd7e3urTp49OnDihdu3aafbs2fLy8rLqvP3223riiSesp9G6deumqVOnVu5gAQBAleXRQDRz5swy9zscDqWmpio1NbXUOn5+fkpLS1NaWlqpdUJDQzVv3rzydhMAAFRzVe4eIgAAgMpGIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALZHIAIAALbn0UA0YcIE3XbbbQoKClJ4eLh69OihLVu2uNUZMGCAHA6H20/Lli3d6uTn52vYsGEKCwtTYGCgunXrpt27d7vVOXjwoJKTkxUcHKzg4GAlJyfr0KFDl3uIAADgCuDRQLRixQoNHTpUq1evVmZmpk6dOqXExEQdO3bMrV5SUpKys7Otn8WLF7vtHzFihBYuXKj58+dr5cqVOnr0qLp06aLCwkKrTr9+/bRhwwZlZGQoIyNDGzZsUHJycqWMEwAAVG3enjx4RkaG23Z6errCw8O1fv163X333Va5r6+vIiMjS2wjLy9PM2fO1Ny5c9W+fXtJ0rx58xQTE6OlS5eqY8eO2rx5szIyMrR69Wq1aNFCkjRjxgwlJCRoy5Ytaty48WUaIQAAuBJ4NBCdKy8vT5IUGhrqVr58+XKFh4erVq1aat26tV566SWFh4dLktavXy+Xy6XExESrfnR0tJo2baqsrCx17NhRq1atUnBwsBWGJKlly5YKDg5WVlZWiYEoPz9f+fn51vbhw4clSS6XSy6Xq+IGXQUVja+6j7OqYd49p2jOnc6y5563pmJxznuOneb+QsdYZQKRMUYpKSm688471bRpU6u8U6dO6t27t2JjY7V9+3Y999xzuueee7R+/Xr5+voqJydHPj4+CgkJcWsvIiJCOTk5kqScnBwrQJ0tPDzcqnOuCRMmaOzYscXKlyxZooCAgEsZ6hUjMzPT012wJebdcwYNKnvuz7lajwrCOe85dpj748ePX1C9KhOIHn/8cf3www9auXKlW3nfvn2tPzdt2lS33nqrYmNj9emnn6pXr16ltmeMkcPhsLbP/nNpdc42evRopaSkWNuHDx9WTEyMEhMTVbNmzQse15XI5XIpMzNTHTp0kNPp9HR3bIN595yiuZ8xo4NcrtLn/pNPKrFTNsA57zl2mvuiKzznUyUC0bBhw/TRRx/pq6++Ut26dcusGxUVpdjYWG3dulWSFBkZqYKCAh08eNBtlSg3N1etWrWy6uzdu7dYW/v27VNERESJx/H19ZWvr2+xcqfTWe1PniJ2GmtVwrx7jsvlVEFB6XPP23J5cM57jh3m/kLH59GnzIwxevzxx/XBBx/oyy+/VFxc3Hlfs3//fv3666+KioqSJDVv3lxOp9Nt2S87O1sbN260AlFCQoLy8vK0Zs0aq863336rvLw8qw4AALAvj64QDR06VO+8844+/PBDBQUFWffzBAcHy9/fX0ePHlVqaqr+8Ic/KCoqSjt27NAzzzyjsLAw9ezZ06o7cOBAjRw5UrVr11ZoaKhGjRql+Ph466mzJk2aKCkpSYMGDdL06dMlSY8++qi6dOnCE2YAAMCzgWjatGmSpDZt2riVp6ena8CAAfLy8tKPP/6ot956S4cOHVJUVJTatm2rBQsWKCgoyKo/efJkeXt7q0+fPjpx4oTatWun2bNny8vLy6rz9ttv64knnrCeRuvWrZumTp16+QcJAACqPI8GImNMmfv9/f31+eefn7cdPz8/paWlKS0trdQ6oaGhmjdv3kX3EQAAVH98lxkAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9AhEAALA9jwaiCRMm6LbbblNQUJDCw8PVo0cPbdmyxa2OMUapqamKjo6Wv7+/2rRpo02bNrnVyc/P17BhwxQWFqbAwEB169ZNu3fvdqtz8OBBJScnKzg4WMHBwUpOTtahQ4cu9xABAMAVwKOBaMWKFRo6dKhWr16tzMxMnTp1SomJiTp27JhVZ+LEiZo0aZKmTp2qtWvXKjIyUh06dNCRI0esOiNGjNDChQs1f/58rVy5UkePHlWXLl1UWFho1enXr582bNigjIwMZWRkaMOGDUpOTq7U8QIAgKrJuzwv2r59u+Li4i754BkZGW7b6enpCg8P1/r163X33XfLGKMpU6bo2WefVa9evSRJc+bMUUREhN555x0NHjxYeXl5mjlzpubOnav27dtLkubNm6eYmBgtXbpUHTt21ObNm5WRkaHVq1erRYsWkqQZM2YoISFBW7ZsUePGjS95LAAA4MpVrhWia6+9Vm3bttW8efN08uTJCutMXl6eJCk0NFTSmeCVk5OjxMREq46vr69at26trKwsSdL69evlcrnc6kRHR6tp06ZWnVWrVik4ONgKQ5LUsmVLBQcHW3UAAIB9lWuF6Pvvv9esWbM0cuRIPf744+rbt68GDhyo22+/vdwdMcYoJSVFd955p5o2bSpJysnJkSRFRES41Y2IiNDOnTutOj4+PgoJCSlWp+j1OTk5Cg8PL3bM8PBwq8658vPzlZ+fb20fPnxYkuRyueRyucozxCtG0fiq+zirGubdc4rm3Okse+55ayoW57zn2GnuL3SM5QpETZs21aRJkzRx4kR9/PHHmj17tu688041bNhQAwcOVHJysurUqXNRbT7++OP64YcftHLlymL7HA6H27YxpljZuc6tU1L9stqZMGGCxo4dW6x8yZIlCggIKPPY1UVmZqanu2BLzLvnDBpU9twvXlxJHbEZznnPscPcHz9+/ILqlSsQWS/29lbPnj3VuXNnvf766xo9erRGjRql0aNHq2/fvnr55ZcVFRV13naGDRumjz76SF999ZXq1q1rlUdGRko6s8Jzdju5ubnWqlFkZKQKCgp08OBBt1Wi3NxctWrVyqqzd+/eYsfdt29fsdWnIqNHj1ZKSoq1ffjwYcXExCgxMVE1a9Y875iuZC6XS5mZmerQoYOcTqenu2MbzLvnFM39jBkd5HKVPveffFKJnbIBznnPsdPcF13hOZ9LCkTr1q3TrFmzNH/+fAUGBmrUqFEaOHCg9uzZo+eff17du3fXmjVrSn29MUbDhg3TwoULtXz58mI3asfFxSkyMlKZmZlq1qyZJKmgoEArVqzQyy+/LElq3ry5nE6nMjMz1adPH0lSdna2Nm7cqIkTJ0qSEhISlJeXpzVr1liX9b799lvl5eVZoelcvr6+8vX1LVbudDqr/clTxE5jrUqYd89xuZwqKCh97nlbLg/Oec+xw9xf6PjKFYgmTZqk9PR0bdmyRZ07d9Zbb72lzp0766qrztyjHRcXp+nTp+u6664rs52hQ4fqnXfe0YcffqigoCDrfp7g4GD5+/vL4XBoxIgRGj9+vBo2bKiGDRtq/PjxCggIUL9+/ay6AwcO1MiRI1W7dm2FhoZq1KhRio+Pt546a9KkiZKSkjRo0CBNnz5dkvToo4+qS5cuPGEGAADKF4imTZumhx9+WA899JB1Wetc9erV08yZM8/bjiS1adPGrTw9PV0DBgyQJD311FM6ceKEhgwZooMHD6pFixZasmSJgoKCrPqTJ0+Wt7e3+vTpoxMnTqhdu3aaPXu2vLy8rDpvv/22nnjiCetptG7dumnq1KkXO3QAAFANlSsQbd269bx1fHx81L9//zLrGGPO247D4VBqaqpSU1NLrePn56e0tDSlpaWVWic0NFTz5s077/EAAID9lOtziNLT0/X+++8XK3///fc1Z86cS+4UAABAZSpXIPrrX/+qsLCwYuXh4eEaP378JXcKAACgMpUrEO3cubPEr+6IjY3Vrl27LrlTAAAAlalcgSg8PFw//PBDsfLvv/9etWvXvuROAQAAVKZyBaL7779fTzzxhJYtW6bCwkIVFhbqyy+/1PDhw3X//fdXdB8BAAAuq3I9ZTZu3Djt3LlT7dq1k7f3mSZOnz6tBx98kHuIAADAFadcgcjHx0cLFizQiy++qO+//17+/v6Kj49XbGxsRfcPAADgsrukr+5o1KiRGjVqVFF9AQAA8IhyBaLCwkLNnj1bX3zxhXJzc3X69Gm3/V9++WWFdA4AAKAylCsQDR8+XLNnz9a9996rpk2byuFwVHS/AAAAKk25AtH8+fP13nvvqXPnzhXdHwAAgEpXrsfufXx8dO2111Z0XwAAADyiXIFo5MiR+tvf/nZBX84KAABQ1ZXrktnKlSu1bNkyffbZZ7rhhhvkdDrd9n/wwQcV0jkAAIDKUK5AVKtWLfXs2bOi+wIAAOAR5QpE6enpFd0PAAAAjynXPUSSdOrUKS1dulTTp0/XkSNHJEl79uzR0aNHK6xzAAAAlaFcK0Q7d+5UUlKSdu3apfz8fHXo0EFBQUGaOHGiTp48qTfeeKOi+wkAAHDZlGuFaPjw4br11lt18OBB+fv7W+U9e/bUF198UWGdAwAAqAzlfsrsm2++kY+Pj1t5bGysfvvttwrpGAAAQGUp1wrR6dOnVVhYWKx89+7dCgoKuuROAQAAVKZyBaIOHTpoypQp1rbD4dDRo0c1ZswYvs4DAABcccp1yWzy5Mlq27atrr/+ep08eVL9+vXT1q1bFRYWpnfffbei+wgAAHBZlSsQRUdHa8OGDXr33Xf13Xff6fTp0xo4cKAeeOABt5usAQAArgTlCkSS5O/vr4cfflgPP/xwRfYHAACg0pUrEL311ltl7n/wwQfL1RkAAABPKFcgGj58uNu2y+XS8ePH5ePjo4CAAAIRAAC4opTrKbODBw+6/Rw9elRbtmzRnXfeyU3VAADgilPu7zI7V8OGDfXXv/612OoRAABAVVdhgUiSvLy8tGfPnopsEgAA4LIr1z1EH330kdu2MUbZ2dmaOnWq7rjjjgrpGAAAQGUpVyDq0aOH27bD4VCdOnV0zz336LXXXquIfgEAAFSacgWi06dPV3Q/AAAAPKZC7yECAAC4EpVrhSglJeWC606aNKk8hwAAAKg05QpE//nPf/Tdd9/p1KlTaty4sSTpp59+kpeXl2655RarnsPhqJheAgAAXEblCkRdu3ZVUFCQ5syZo5CQEElnPqzxoYce0l133aWRI0dWaCcBAAAup3LdQ/Taa69pwoQJVhiSpJCQEI0bN46nzAAAwBWnXIHo8OHD2rt3b7Hy3NxcHTly5JI7BQAAUJnKFYh69uyphx56SP/617+0e/du7d69W//61780cOBA9erVq6L7CAAAcFmV6x6iN954Q6NGjdKf/vQnuVyuMw15e2vgwIF65ZVXKrSDAAAAl1u5AlFAQIBef/11vfLKK9q2bZuMMbr22msVGBhY0f0DAAC47C7pgxmzs7OVnZ2tRo0aKTAwUMaYiuoXAABApSlXINq/f7/atWunRo0aqXPnzsrOzpYkPfLIIxf1yP1XX32lrl27Kjo6Wg6HQ4sWLXLbP2DAADkcDrefli1butXJz8/XsGHDFBYWpsDAQHXr1k27d+92q3Pw4EElJycrODhYwcHBSk5O1qFDh8ozdAAAUA2VKxA9+eSTcjqd2rVrlwICAqzyvn37KiMj44LbOXbsmG666SZNnTq11DpJSUnWSlR2drYWL17stn/EiBFauHCh5s+fr5UrV+ro0aPq0qWLCgsLrTr9+vXThg0blJGRoYyMDG3YsEHJyckXMWIAAFCdleseoiVLlujzzz9X3bp13cobNmyonTt3XnA7nTp1UqdOncqs4+vrq8jIyBL35eXlaebMmZo7d67at28vSZo3b55iYmK0dOlSdezYUZs3b1ZGRoZWr16tFi1aSJJmzJihhIQEbdmyxfqkbQAAYF/lCkTHjh1zWxkq8vvvv8vX1/eSO3W25cuXKzw8XLVq1VLr1q310ksvKTw8XJK0fv16uVwuJSYmWvWjo6PVtGlTZWVlqWPHjlq1apWCg4OtMCRJLVu2VHBwsLKyskoNRPn5+crPz7e2Dx8+LElyuVzWk3XVVdH4qvs4qxrm3XOK5tzpLHvueWsqFue859hp7i90jOUKRHfffbfeeustvfjii5LOfGfZ6dOn9corr6ht27blabJEnTp1Uu/evRUbG6vt27frueee0z333KP169fL19dXOTk58vHxcfvEbEmKiIhQTk6OJCknJ8cKUGcLDw+36pRkwoQJGjt2bLHyJUuWlBgGq6PMzExPd8GWmHfPGTSo7Lk/54o9KgjnvOfYYe6PHz9+QfXKFYheeeUVtWnTRuvWrVNBQYGeeuopbdq0SQcOHNA333xTniZL1LdvX+vPTZs21a233qrY2Fh9+umnZX4ApDHG7YtlS/qS2XPrnGv06NFKSUmxtg8fPqyYmBglJiaqZs2aFzuUK4rL5VJmZqY6dOggp9Pp6e7YBvPuOUVzP2NGB7lcpc/9J59UYqdsgHPec+w090VXeM6nXIHo+uuv1w8//KBp06bJy8tLx44dU69evTR06FBFRUWVp8kLEhUVpdjYWG3dulWSFBkZqYKCAh08eNBtlSg3N1etWrWy6pT0NSP79u1TREREqcfy9fUt8fKf0+ms9idPETuNtSph3j3H5XKqoKD0uedtuTw45z3HDnN/oeO76EBUdM/O9OnTS7ykdDnt379fv/76qxW6mjdvLqfTqczMTPXp00fSmc9G2rhxoyZOnChJSkhIUF5entasWaPbb79dkvTtt98qLy/PCk0AAMDeLjoQOZ1Obdy4sczLTRfq6NGj+vnnn63t7du3a8OGDQoNDVVoaKhSU1P1hz/8QVFRUdqxY4eeeeYZhYWFqWfPnpKk4OBgDRw4UCNHjlTt2rUVGhqqUaNGKT4+3nrqrEmTJkpKStKgQYM0ffp0SdKjjz6qLl268IQZAACQVM7PIXrwwQc1c+bMSz74unXr1KxZMzVr1kySlJKSombNmun555+Xl5eXfvzxR3Xv3l2NGjVS//791ahRI61atUpBQUFWG5MnT1aPHj3Up08f3XHHHQoICNDHH38sLy8vq87bb7+t+Ph4JSYmKjExUTfeeKPmzp17yf0HAADVQ7nuISooKNCbb76pzMxM3XrrrcW+w2zSpEkX1E6bNm3K/LqPzz///Lxt+Pn5KS0tTWlpaaXWCQ0N1bx58y6oTwAAwH4uKhD98ssvql+/vjZu3KhbbrlFkvTTTz+51amIS2kAAACV6aICUcOGDZWdna1ly5ZJOvNY/N///vcyn9YCAACo6i7qHqJzL2999tlnOnbsWIV2CAAAoLKV66bqImXd/wMAAHCluKhA5HA4it0jxD1DAADgSndR9xAZYzRgwADrE5xPnjypxx57rNhTZh988EHF9RAAAOAyu6hA1L9/f7ftP/3pTxXaGQAAAE+4qECUnp5+ufoBAADgMZd0UzUAAEB1QCACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC2RyACAAC259FA9NVXX6lr166Kjo6Ww+HQokWL3PYbY5Samqro6Gj5+/urTZs22rRpk1ud/Px8DRs2TGFhYQoMDFS3bt20e/dutzoHDx5UcnKygoODFRwcrOTkZB06dOgyjw4AAFwpPBqIjh07pptuuklTp04tcf/EiRM1adIkTZ06VWvXrlVkZKQ6dOigI0eOWHVGjBihhQsXav78+Vq5cqWOHj2qLl26qLCw0KrTr18/bdiwQRkZGcrIyNCGDRuUnJx82ccHAACuDN6ePHinTp3UqVOnEvcZYzRlyhQ9++yz6tWrlyRpzpw5ioiI0DvvvKPBgwcrLy9PM2fO1Ny5c9W+fXtJ0rx58xQTE6OlS5eqY8eO2rx5szIyMrR69Wq1aNFCkjRjxgwlJCRoy5Ytaty4ceUMFgAAVFkeDURl2b59u3JycpSYmGiV+fr6qnXr1srKytLgwYO1fv16uVwutzrR0dFq2rSpsrKy1LFjR61atUrBwcFWGJKkli1bKjg4WFlZWaUGovz8fOXn51vbhw8fliS5XC65XK6KHm6VUjS+6j7OqoZ595yiOXc6y5573pqKxTnvOXaa+wsdY5UNRDk5OZKkiIgIt/KIiAjt3LnTquPj46OQkJBidYpen5OTo/Dw8GLth4eHW3VKMmHCBI0dO7ZY+ZIlSxQQEHBxg7lCZWZmeroLtsS8e86gQWXP/eLFldQRm+Gc9xw7zP3x48cvqF6VDURFHA6H27YxpljZuc6tU1L987UzevRopaSkWNuHDx9WTEyMEhMTVbNmzQvt/hXJ5XIpMzNTHTp0kNPp9HR3bIN595yiuZ8xo4NcrtLn/pNPKrFTNsA57zl2mvuiKzznU2UDUWRkpKQzKzxRUVFWeW5urrVqFBkZqYKCAh08eNBtlSg3N1etWrWy6uzdu7dY+/v27Su2+nQ2X19f+fr6Fit3Op3V/uQpYqexViXMu+e4XE4VFJQ+97wtlwfnvOfYYe4vdHxV9nOI4uLiFBkZ6bacV1BQoBUrVlhhp3nz5nI6nW51srOztXHjRqtOQkKC8vLytGbNGqvOt99+q7y8PKsOAACwN4+uEB09elQ///yztb19+3Zt2LBBoaGhqlevnkaMGKHx48erYcOGatiwocaPH6+AgAD169dPkhQcHKyBAwdq5MiRql27tkJDQzVq1CjFx8dbT501adJESUlJGjRokKZPny5JevTRR9WlSxeeMAMAAJI8HIjWrVuntm3bWttF9+z0799fs2fP1lNPPaUTJ05oyJAhOnjwoFq0aKElS5YoKCjIes3kyZPl7e2tPn366MSJE2rXrp1mz54tLy8vq87bb7+tJ554wnoarVu3bqV+9hEAALAfjwaiNm3ayBhT6n6Hw6HU1FSlpqaWWsfPz09paWlKS0srtU5oaKjmzZt3KV0FAADVWJW9hwgAAKCyEIgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtEYgAAIDtVelAlJqaKofD4fYTGRlp7TfGKDU1VdHR0fL391ebNm20adMmtzby8/M1bNgwhYWFKTAwUN26ddPu3bsreygAAKAKq9KBSJJuuOEGZWdnWz8//vijtW/ixImaNGmSpk6dqrVr1yoyMlIdOnTQkSNHrDojRozQwoULNX/+fK1cuVJHjx5Vly5dVFhY6InhAACAKsjb0x04H29vb7dVoSLGGE2ZMkXPPvusevXqJUmaM2eOIiIi9M4772jw4MHKy8vTzJkzNXfuXLVv316SNG/ePMXExGjp0qXq2LFjpY4FAABUTVU+EG3dulXR0dHy9fVVixYtNH78eF1zzTXavn27cnJylJiYaNX19fVV69atlZWVpcGDB2v9+vVyuVxudaKjo9W0aVNlZWWVGYjy8/OVn59vbR8+fFiS5HK55HK5LsNIq46i8VX3cVY1zLvnFM2501n23PPWVCzOec+x09xf6BirdCBq0aKF3nrrLTVq1Eh79+7VuHHj1KpVK23atEk5OTmSpIiICLfXREREaOfOnZKknJwc+fj4KCQkpFidoteXZsKECRo7dmyx8iVLliggIOBShnXFyMzM9HQXbIl595xBg8qe+8WLK6kjNsM57zl2mPvjx49fUL0qHYg6depk/Tk+Pl4JCQlq0KCB5syZo5YtW0qSHA6H22uMMcXKznUhdUaPHq2UlBRr+/Dhw4qJiVFiYqJq1qx5sUO5orhcLmVmZqpDhw5yOp2e7o5tMO+eUzT3M2Z0kMtV+tx/8kkldsoGOOc9x05zX3SF53yqdCA6V2BgoOLj47V161b16NFD0plVoKioKKtObm6utWoUGRmpgoICHTx40G2VKDc3V61atSrzWL6+vvL19S1W7nQ6q/3JU8ROY61KmHfPcbmcKigofe55Wy4PznnPscPcX+j4qvxTZmfLz8/X5s2bFRUVpbi4OEVGRrot9xUUFGjFihVW2GnevLmcTqdbnezsbG3cuPG8gQgAANhHlV4hGjVqlLp27ap69eopNzdX48aN0+HDh9W/f385HA6NGDFC48ePV8OGDdWwYUONHz9eAQEB6tevnyQpODhYAwcO1MiRI1W7dm2FhoZq1KhRio+Pt546AwAAqNKBaPfu3frjH/+o33//XXXq1FHLli21evVqxcbGSpKeeuopnThxQkOGDNHBgwfVokULLVmyREFBQVYbkydPlre3t/r06aMTJ06oXbt2mj17try8vDw1LAAAUMVU6UA0f/78Mvc7HA6lpqYqNTW11Dp+fn5KS0tTWlpaBfcOAABUF1fUPUQAAACXA4EIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYHoEIAADYnrenOwAAl1vbtmXv9/GRhgypnL4AqJoIRACuaOcLO5V9rGXLLn8/AFQ8LpkBAADbY4UIgEfYebXFzmMHqioCEQAL/1ADsCsumQEAANtjhQgAqjFW/YALwwoRAACwPVaIgHLgf90AUL3YaoXo9ddfV1xcnPz8/NS8eXN9/fXXnu4SAACoAmyzQrRgwQKNGDFCr7/+uu644w5Nnz5dnTp10n//+1/Vq1fP090DAFwEVmlR0WyzQjRp0iQNHDhQjzzyiJo0aaIpU6YoJiZG06ZN83TXAACAh9kiEBUUFGj9+vVKTEx0K09MTFRWVpaHegUAAKoKW1wy+/3331VYWKiIiAi38oiICOXk5JT4mvz8fOXn51vbeXl5kqQDBw7I5XJVaP/69Dl/nffeq9BDlsnlcun48ePav3+/nE5n5R34CnLVBfxXYv/+i2uzKsz75RjXpRyrXbuKaef8zsy9tF9XXXVpc19Rfa7Mea6oY12sSznnq/K4rgSV/feNJ/+dO3LkiCTJGFNmPVsEoiIOh8Nt2xhTrKzIhAkTNHbs2GLlcXFxl6Vv5xMW5pHD4hJU1/esuo5ryRJP98BdZc5zdX1Pq+u4qqvL/X4dOXJEwcHBpe63RSAKCwuTl5dXsdWg3NzcYqtGRUaPHq2UlBRr+/Tp0zpw4IBq165daoiqLg4fPqyYmBj9+uuvqlmzpqe7YxvMu+cw957BvHuOnebeGKMjR44oOjq6zHq2CEQ+Pj5q3ry5MjMz1bNnT6s8MzNT3bt3L/E1vr6+8vX1dSurVavW5exmlVOzZs1q/4tSFTHvnsPcewbz7jl2mfuyVoaK2CIQSVJKSoqSk5N16623KiEhQf/85z+1a9cuPfbYY57uGgAA8DDbBKK+fftq//79euGFF5Sdna2mTZtq8eLFio2N9XTXAACAh9kmEEnSkCFDNGTIEE93o8rz9fXVmDFjil0yxOXFvHsOc+8ZzLvnMPfFOcz5nkMDAACo5mzxwYwAAABlIRABAADbIxABAADbIxABAADbIxChVDt27NDAgQMVFxcnf39/NWjQQGPGjFFBQYGnu1btvfTSS2rVqpUCAgJs94Ggle31119XXFyc/Pz81Lx5c3399dee7lK199VXX6lr166Kjo6Ww+HQokWLPN0lW5gwYYJuu+02BQUFKTw8XD169NCWLVs83a0qg0CEUv3f//2fTp8+renTp2vTpk2aPHmy3njjDT3zzDOe7lq1V1BQoN69e+vPf/6zp7tSrS1YsEAjRozQs88+q//85z+666671KlTJ+3atcvTXavWjh07pptuuklTp071dFdsZcWKFRo6dKhWr16tzMxMnTp1SomJiTp27Jinu1Yl8Ng9Lsorr7yiadOm6ZdffvF0V2xh9uzZGjFihA4dOuTprlRLLVq00C233KJp06ZZZU2aNFGPHj00YcIED/bMPhwOhxYuXKgePXp4uiu2s2/fPoWHh2vFihW6++67Pd0dj2OFCBclLy9PoaGhnu4GcMkKCgq0fv16JSYmupUnJiYqKyvLQ70CKk9eXp4k8Xf6/49AhAu2bds2paWl8f1vqBZ+//13FRYWKiIiwq08IiJCOTk5HuoVUDmMMUpJSdGdd96ppk2bero7VQKByIZSU1PlcDjK/Fm3bp3ba/bs2aOkpCT17t1bjzzyiId6fmUrz7zj8nM4HG7bxphiZUB18/jjj+uHH37Qu+++6+muVBm2+i4znPH444/r/vvvL7NO/fr1rT/v2bNHbdu2VUJCgv75z39e5t5VXxc777i8wsLC5OXlVWw1KDc3t9iqEVCdDBs2TB999JG++uor1a1b19PdqTIIRDYUFhamsLCwC6r722+/qW3btmrevLnS09N11VUsKpbXxcw7Lj8fHx81b95cmZmZ6tmzp1WemZmp7t27e7BnwOVhjNGwYcO0cOFCLV++XHFxcZ7uUpVCIEKp9uzZozZt2qhevXp69dVXtW/fPmtfZGSkB3tW/e3atUsHDhzQrl27VFhYqA0bNkiSrr32WtWoUcOznatGUlJSlJycrFtvvdVaAd21axf3yV1mR48e1c8//2xtb9++XRs2bFBoaKjq1avnwZ5Vb0OHDtU777yjDz/8UEFBQdbqaHBwsPz9/T3cO8/jsXuUavbs2XrooYdK3Mdpc3kNGDBAc+bMKVa+bNkytWnTpvI7VI29/vrrmjhxorKzs9W0aVNNnjyZR5Avs+XLl6tt27bFyvv376/Zs2dXfodsorR749LT0zVgwIDK7UwVRCACAAC2xw0hAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9ghEAKo8h8OhRYsWWdv/93//p5YtW8rPz08333xzqWUAKt9XX32lrl27Kjo6utjv7uVQv379Er8se+jQoRfVDoEIgMcMGDDA+svL6XQqIiJCHTp00KxZs3T69GmrXnZ2tjp16mRtjxkzRoGBgdqyZYu++OKLUssAVL5jx47ppptu0tSpUyvleGvXrlV2drb1k5mZKUnq3bv3RbVDIALgUUlJScrOztaOHTv02WefqW3btho+fLi6dOmiU6dOSTrz3Xm+vr7Wa7Zt26Y777xTsbGxql27dqllF6ugoODSBwTYXKdOnTRu3Dj16tWrxP0FBQV66qmndPXVVyswMFAtWrTQ8uXLy328OnXqKDIy0vr55JNP1KBBA7Vu3fqi2iEQAfAoX19fRUZG6uqrr9Ytt9yiZ555Rh9++KE+++wz63utzl52dzgcWr9+vV544QU5HA6lpqaWWCZJv/32m/r27auQkBDVrl1b3bt3144dO6xjDxgwQD169NCECRMUHR2tRo0aXdTrXn31VUVFRal27doaOnSoXC6XVSc/P19PPfWUYmJi5Ovrq4YNG2rmzJnW/v/+97/q3LmzatSooYiICCUnJ+v333+/LHMMVCUPPfSQvvnmG82fP18//PCDevfuraSkJG3duvWS2y4oKNC8efP08MMPl/rdbaUhEAGocu655x7ddNNN+uCDD4rty87O1g033KCRI0cqOztbo0aNKrHs+PHjatu2rWrUqKGvvvpKK1euVI0aNZSUlOS2EvTFF19o8+bNyszM1CeffHLBr1u2bJm2bdumZcuWac6cOZo9e7bbF5M++OCDmj9/vv7+979r8+bNeuONN1SjRg1rDK1bt9bNN9+sdevWKSMjQ3v37lWfPn0u36QCVcC2bdv07rvv6v3339ddd92lBg0aaNSoUbrzzjuVnp5+ye0vWrRIhw4dKteX1Xpf8tEB4DK47rrr9MMPPxQrj4yMlLe3t2rUqKHIyEhJUo0aNYqVzZo1S1dddZXefPNN63+K6enpqlWrlpYvX67ExERJUmBgoN588035+Phc1OtCQkI0depUeXl56brrrtO9996rL774QoMGDdJPP/2k9957T5mZmWrfvr0k6ZprrrHGMG3aNN1yyy0aP368VTZr1izFxMTop59+slaqgOrmu+++kzGm2Dmen59vXeresWOH4uLiymxn6NChJd6jNHPmTHXq1EnR0dEX3TcCEYAqyRhz0UveZ1u/fr1+/vlnBQUFuZWfPHlS27Zts7bj4+OtMHQxr7vhhhvk5eVlbUdFRenHH3+UJG3YsEFeXl6l3sOwfv16LVu2zFoxOtu2bdsIRKi2Tp8+LS8vL61fv97t90eS9ftw9dVXa/PmzWW2ExISUqxs586dWrp0aYkryxeCQASgStq8efN5/5dYltOnT6t58+Z6++23i+2rU6eO9efAwMByvc7pdLrtczgc1pNx/v7+5+1b165d9fLLLxfbFxUVVeZrgStZs2bNVFhYqNzcXN11110l1nE6nbruuusuuu309HSFh4fr3nvvLVffCEQAqpwvv/xSP/74o5588slyt3HLLbdowYIFCg8PV82aNS/7684WHx+v06dPa8WKFdYls3OP8e9//1v169eXtzd/DaN6OXr0qH7++Wdre/v27dqwYYNCQ0PVqFEjPfDAA3rwwQf12muvqVmzZvr999/15ZdfKj4+Xp07dy7XMU+fPq309HT179+/3L9T3FQNwKPy8/OVk5Oj3377Td99953Gjx+v7t27q0uXLnrwwQfL3e4DDzygsLAwde/eXV9//bW2b9+uFStWaPjw4dq9e3eFv+5s9evXV//+/fXwww9r0aJF2r59u5YvX6733ntP0pn7Hw4cOKA//vGPWrNmjX755RctWbJEDz/8sAoLC8s9ZqAqWLdunZo1a6ZmzZpJklJSUtSsWTM9//zzks6s5Dz44IMaOXKkGjdurG7duunbb79VTExMuY+5dOlS7dq1Sw8//HC52+C/JgA8KiMjQ1FRUfL29lZISIhuuukm/f3vf1f//v111VXl/z9bQECAvvrqKz399NPq1auXjhw5oquvvlrt2rUrc+WnvK8717Rp0/TMM89oyJAh2r9/v+rVq6dnnnlGkhQdHa1vvvlGTz/9tDp27Kj8/HzFxsYqKSnpksYMVAVt2rSRMabU/U6nU2PHjtXYsWMr7JiJiYllHvNCOMyltgAAAHCF478iAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9ghEAADA9v4/1PkCnvC0QMYAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "differences = hf_model.model.layers[0](test_tensor)[0] - tl_model.blocks[0](test_tensor)\n", + "\n", + "# Flatten the differences to create a one-dimensional tensor\n", + "flattened_differences = differences.flatten().cpu().detach().numpy()\n", + "\n", + "# Plot the histogram of the differences\n", + "plt.hist(flattened_differences, bins=50, alpha=0.75, color='blue')\n", + "plt.title('Differences Between Layer Outputs')\n", + "plt.xlabel('Difference')\n", + "plt.ylabel('Frequency')\n", + "plt.grid(True)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "e05fb0b6-b05d-4651-a976-2772a4177a0a", + "metadata": {}, + "source": [ + "## Compare MLP Outputs" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "44a55507-e639-414a-a297-e68e1c0696f9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor(True)" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.all(\n", + " tl_model.blocks[0].mlp.experts[0].W_in ==\n", + " hf_model.model.layers[0].block_sparse_moe.experts[0].w3.weight.T\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "03944deb-aa8d-46ff-83dd-4f7ee955656c", + "metadata": {}, + "outputs": [], + "source": [ + "test_tensor = torch.randn((1, 1, 4096,))" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "eb0109ee-b82a-4ea0-b50b-8e6408647cea", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor(False)" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.all(\n", + " hf_model.model.layers[0].block_sparse_moe(test_tensor)[0] ==\n", + " tl_model.blocks[0].mlp(test_tensor)\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "25ce75bf-706e-4ae8-8f74-bc9c40e88c25", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[[ 0.4624, -0.3203, 0.3846, ..., 0.5780, 0.2270, 0.3475]]],\n", + " grad_fn=)" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "hf_model.model.layers[0].block_sparse_moe(test_tensor)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "c016430e-0a30-426b-bfd0-0b1b423b3ff6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[[ 0.4624, -0.3203, 0.3846, ..., 0.5780, 0.2270, 0.3475]]],\n", + " grad_fn=)" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tl_model.blocks[0].mlp(test_tensor)" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "46353486-0a3f-4241-9cf5-ed25c7539f71", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([1, 1, 4096])" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tl_model.blocks[0].mlp(test_tensor).shape" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "e25ada54-4e3c-42b7-8f35-ba67bfa500e3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[[False, False, False, ..., False, False, False]]])" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "hf_model.model.layers[0].block_sparse_moe(test_tensor)[0] == tl_model.blocks[0].mlp(test_tensor)" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "8f3a2865-645d-4441-95fb-32446f866760", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor(201)" + ] + }, + "execution_count": 33, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.sum(hf_model.model.layers[0].block_sparse_moe(test_tensor)[0] == tl_model.blocks[0].mlp(test_tensor))" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "id": "c6ef1f5e-bdf0-45e5-9347-6972e91e2f2f", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHFCAYAAAAUpjivAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8fJSN1AAAACXBIWXMAAA9hAAAPYQGoP6dpAABMDklEQVR4nO3deViU5f4/8PcIw7AICCIMJCKHQFTIFEvFElHAJVxPobkbeenRTFy+pi1HLA+mpVmamoWgGWGpWLmjomZQKmrmkluISyDHDXAbRrh/f/jjOY5swzAww+P7dV1cNc/ccz+fzzwz+OZZZhRCCAEiIiIimWpg6gKIiIiIahPDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMO1TuJiYlQKBTSj7W1NdRqNUJDQzF37lzk5eWVeUxsbCwUCoXOsqKiIowbNw7u7u6wsLDAs88+CwC4ceMGBg8eDFdXVygUCvTv378Ouqq/Lly4oLM9FAoFHBwc0KZNGyxatAjFxcUGzbtlyxbExsYat1gzU/p8jRo1qtz733//fWnMhQsXpOWjRo1Cw4YNK5378feJpaUlmjZtitGjR+PKlSt61Xf9+nXMnDkTrVq1gq2tLRwcHNCxY0d8/vnn0Gq1+rZZRl1v26VLlyIxMbHO1kdmSBDVMwkJCQKASEhIEBkZGWLfvn1i3bp1IiYmRjg6OgpnZ2eRmpqq85hLly6JjIwMnWWLFi0SAMTixYtFenq6OHbsmBBCiJiYGGFlZSXWrFkjMjIyxOnTp+ust/ooKytLABATJ04UGRkZIiMjQ2zdulX861//EgDElClTDJp3woQJQu6/ogAIe3t7YWtrKwoKCnTuKykpEd7e3sLBwUEAEFlZWdJ9I0eOFHZ2dpXO/fj7ZPfu3SI2NlaoVCrh7e0tbt++XenjT506JTw9PYWTk5OYM2eO2L17t9i8ebP417/+JSwsLERISIi4c+eOQX3X9bZt3bq1CAkJqbP1kfmR928SkqXSX+IHDx4sc192drbw9PQU9vb2Ijc3t9J5Xn/9dWFjY1NmeVhYmGjZsqXR6hVCiLt37xp1PnNSGnY++uijMve9+OKLwt3d3aB5n5SwM2zYMGFjYyNWrFihc9/OnTsFADFmzJgahZ3H3yfvvfeeACDWrFlT4WMfPHggWrVqJRwdHcsN+8nJyQKAGDt2rB5dlsWwQ3WNh7FIVpo1a4YFCxagsLAQX3zxhbT88cNYCoUCX331Fe7duyft5i/d7b9z506cOnVKWr5nzx4ADw97zZkzB/7+/lCpVGjSpAlGjx6N//73vzo1NG/eHJGRkdiwYQPatm0La2trzJ49GwCQm5uLsWPHomnTprCysoK3tzdmz56NBw8eSI8vPSz08ccfY+HChfD29kbDhg3RqVMn/Prrr2V6/u2339CnTx80btwY1tbW8PHxQUxMjM6Ys2fPYsiQIXB1dYVKpULLli3x+eef64wpKSnBnDlz0KJFC9jY2KBRo0Z45pln8Omnnxq0LQDA0dERSqWyzPK1a9eiU6dOsLOzQ8OGDdGjRw8cOXJEun/UqFFSfY8eirlw4QJeeeUVtG7dWme+Pn36QKFQ4Pvvv5eWHT58GAqFAj/99JO0TJ/nH6j+tt62bRvatWsHGxsb+Pv7Y+XKldV6jgYMGFDmMStXrkTnzp3h5+en91z66NixIwAgOzu7wjEpKSk4efIkZsyYUe76Bw0ahIiICMTHxyM3NxcAsGfPHp33S6nS13PpYaTKtm3psjfeeANffPEF/Pz8oFKp0KpVKyQnJ+vMW96haeB/h+9K52vevDlOnDiBvXv3Sutq3rw5gNp5zZN5sjR1AUTG1rt3b1hYWGDfvn0VjsnIyMAHH3yAtLQ07N69GwDg7e2NjIwMjB8/Hvn5+fjmm28AAK1atUJJSQn69euHn3/+GdOnT0dwcDCys7Mxa9YsdO3aFYcOHYKNjY00/+HDh3Hq1Cm8++678Pb2hp2dHXJzc/H888+jQYMG+Pe//w0fHx9kZGRgzpw5uHDhAhISEnRq/Pzzz+Hv749FixYBAN577z307t0bWVlZcHR0BABs374dffr0QcuWLbFw4UI0a9YMFy5cwI4dO6R5Tp48ieDgYCkIqtVqbN++HW+++SauXbuGWbNmAQDmz5+P2NhYvPvuu+jSpQu0Wi3+/PNP3Lp1S6/nvaSkRAoN+fn5+OGHH7Bt2za89dZbOuPi4uLw7rvvYvTo0Xj33XdRVFSEjz76CC+++CIOHDiAVq1a4b333sOdO3ewbt06ZGRkSI91d3dHWFgY1q1bh5ycHLi7u+PBgwfYu3cvbGxskJqaildeeQUAsHPnTlhaWqJr164AoPfzX91t/fvvv2Pq1KmYMWMG3Nzc8NVXXyE6OhpPP/00unTpotdzFx0dje7du+PUqVNo2bIlbt26hQ0bNmDp0qW4fv26XnPo69y5cwCAJk2aVDgmNTUVACo9X61///7YsWMH9uzZg8GDB+u9/sq2bakff/wRaWlpeP/992FnZ4elS5fi1VdfhaWlJV5++WW91wU8DG4vv/wyHB0dsXTpUgCASqUCUPPXPNUjpt61RFRdlR3GKuXm5qZzKGrWrFlldptXdCggJCREtG7dWmfZt99+KwCI9evX6yw/ePCgACCWLl0qLfPy8hIWFhZldv+PHTtWNGzYUGRnZ+ss//jjjwUAceLECSHE/w4LBQYGigcPHkjjDhw4IACIb7/9Vlrm4+MjfHx8xL179yp8Lnr06CGaNm0q8vPzdZa/8cYbwtraWty4cUMIIURkZKR49tlnK5ynIqX1lvczatQonR4uXrwoLC0txcSJE3XmKCwsFGq1WkRFRUnLKjrUce7cOQFArF69WgghxP79+wUAMX36dOHt7S2NCw8PF8HBwdJtfZ//6m5ra2trnTnv3bsnnJ2d9TrEA0BMmDBBOj9n2rRpQgghPv/8c9GwYUNRWFgoPvrooxodxvr111+FVqsVhYWFYtOmTaJJkyZVHubt2bOnACDu379f4ZitW7cKAGLevHlCCCHS0tIEAJGWlqYzrvT1kZCQIC2r7DAWAGFjY6NT34MHD4S/v794+umnpWXlvacf7fvR56uiw1iGvuap/uFhLJIlIYRR59u0aRMaNWqEPn364MGDB9LPs88+C7VaXWbX/TPPPFNm9/+mTZsQGhoKDw8PnTl69eoFANi7d6/O+JdeegkWFhY6cwL/O/xw5swZnD9/HtHR0bC2ti637vv372PXrl0YMGAAbG1tddbbu3dv3L9/Xzo09vzzz+P333/H+PHjsX37dhQUFFTrOZo0aRIOHjyIgwcPIi0tDXFxcfjuu+/w6quvSmO2b9+OBw8eYMSIETq1WFtbIyQkpMzzWB4fHx80b94cO3fuBPBwL0RgYCCGDRuGrKwsnD9/HhqNBvv370dYWJj0OH2f/+pu62effRbNmjWTbltbW8PPz6/Sw0SPK70i6+uvv8aDBw8QHx+PqKioKq+40kfHjh2hVCphb2+PyMhIqNVqbN26FW5ubjWat/Q9Vt6hpJrq3r27Tn0WFhYYNGgQzp07h8uXLxttPTV9zVP9wcNYJDt37tzB9evXERgYaLQ5r169ilu3bsHKyqrc+69du6Zz+9Fd8o/O8dNPP5V7Dkt5czRu3Fjndumu93v37gGAdP5I06ZNK6z7+vXrePDgARYvXozFixdXut6ZM2fCzs4Oa9aswfLly2FhYYEuXbpg3rx5aN++fYXrKNW0aVOdcV27doVCocDMmTOxfft29OjRA1evXgUAPPfcc+XO0aCBfn9/de/eHdu2bQPw8HBVeHg4AgMD4ebmhp07d8LX1xf37t3TCTv6Pv/V3daPbyfg4bYq3U76Gj16NGbPno24uDgcPny4wu1VXatXr0bLli1haWkJNze3cl+bjysNb1lZWfD39y93TOk5MZ6enkap81FqtbrCZdevX6/0NV8dNX3NU/3BsEOys3nzZhQXF0vnahiDi4sLGjduLP0D+zh7e3ud2+X9tevi4oJnnnkG//nPf8qdw8PDo1o1lZ5zUdlfuk5OTrCwsMDw4cMxYcKEcsd4e3sDACwtLTFlyhRMmTIFt27dws6dO/H222+jR48euHTpEmxtbatVH/C/vVG///47evToARcXFwDAunXr4OXlVe35SnXv3h3x8fE4cOAAfvvtN7z77rsAgG7duiE1NRXZ2dlo2LChdDIuoP/zX91tbSyenp4ICwvD7Nmz0aJFCwQHBxtl3pYtW1b7H+7w8HCsWLECGzduxIwZM8ods3HjRp1zokr3Lmo0Gp1xj4dDfZSe9FzestJw+ej6Sv8QqO76auM1T+aJYYdk5eLFi5g2bRocHR0xduxYo80bGRmJ5ORkFBcXo0OHDgbPsWXLFvj4+MDJyanGNfn5+cHHxwcrV67ElClTdH7hl7K1tUVoaCiOHDmCZ555psK9FY9r1KgRXn75ZVy5cgUxMTG4cOECWrVqVe0ajx49CgBwdXUFAPTo0QOWlpY4f/48/vnPf1b62Ef3ZD16QjDwMOwoFAq89957aNCggXQicFhYGP7v//4P2dnZ6NKli85eHH2ff2Nsa0NNnToVNjY20knWpjJgwAC0atUKH374IQYOHFjmkOzatWuxY8cOjBs3TtrjUnqF07Fjx9CjRw9p7I8//lhm/sq2LQDs2rULV69elQ5lFRcXY+3atfDx8ZH26jy6vkf3FD569d2j66tqT5uxXvNknhh2qN46fvy4dD5FXl4efv75ZyQkJMDCwgIpKSmVXm1SXYMHD8Y333yD3r17Y9KkSXj++eehVCpx+fJlpKWloV+/fhgwYEClc7z//vtITU1FcHAw3nzzTbRo0QL379/HhQsXsGXLFixfvrzau+c///xz9OnTBx07dsTkyZPRrFkzXLx4Edu3b5euJvv000/xwgsv4MUXX8S//vUvNG/eHIWFhTh37hx++ukn6Wq0Pn36ICAgAO3bt0eTJk2QnZ2NRYsWwcvLC76+vlXWcvHiRen8nzt37iAjIwNz586Fl5cXBg4cCODhP1Dvv/8+3nnnHfz111/o2bMnnJyccPXqVRw4cAB2dnbSZfqlhyHnzZuHXr16wcLCQgpsrq6uCAgIwI4dOxAaGir9BR4WFoYbN27gxo0bWLhwoUHPvzG2taEiIiIQERGh19ji4mKsW7euzHI7OzvpPCRDWVhYYP369QgPD0enTp0wdepUdOrUCRqNBj/99BNWrFiBkJAQLFiwQHqMWq1GWFgY5s6dCycnJ3h5eWHXrl3YsGFDmfkr27bAw71r3bp1w3vvvSddjfXnn3/qXH7eu3dvODs7Izo6Gu+//z4sLS2RmJiIS5culbu+5ORkrF27Fv/4xz9gbW2NwMDAGr/mqR4x9RnSRNVVerVF6Y+VlZVwdXUVISEhIi4uTuTl5ZV5TE2vxhJCCK1WKz7++GPRpk0bYW1tLRo2bCj8/f3F2LFjxdmzZ6VxXl5e4qWXXiq39v/+97/izTffFN7e3kKpVApnZ2cRFBQk3nnnHekTbSv7kD4AYtasWTrLMjIyRK9evYSjo6NQqVTCx8dHTJ48WWdMVlaWeO2118RTTz0llEqlaNKkiQgODhZz5syRxixYsEAEBwcLFxcXYWVlJZo1ayaio6PFhQsXyu3l0bnx2FVY1tbWws/PT8TExIicnJwyj9m4caMIDQ0VDg4OQqVSCS8vL/Hyyy+LnTt3SmM0Go14/fXXRZMmTYRCoShzhc3kyZMFAPGf//xHZ25fX18BQPpE7Efp8/wLUfNtHRISoteH2OH/X41VmYquxnr8OS/98fLyEkLod9ViVa5duyZmzJgh/P39pefh+eefF0uWLBFFRUVlxufk5IiXX35ZODs7C0dHRzFs2DBx6NChMldjVbZtS5+TpUuXCh8fH6FUKoW/v7/45ptvyqzvwIEDIjg4WNjZ2YmnnnpKzJo1S3z11Vdlnq8LFy6IiIgIYW9vr/McGfqap/pHIYSRL1shIiIykEKhwIQJE7BkyRJTl0IywkvPiYiISNYYdoiIiEjWeIIyERGZDZ5ZQbWBe3aIiIhI1hh2iIiISNYYdoiIiEjWeM4OgJKSEvz999+wt7evlS+1IyIiIuMTQqCwsBAeHh6Vfrceww6Av//+u1a+zI6IiIhq36VLlyr9BHqGHfzvi/0uXboEBwcHE1dTMa1Wix07diAiIqLCb26WA/YpP09Kr+xTXtin+SsoKICnp2eVX9DLsIP/fUO1g4OD2YcdW1tbODg41LsXZHWwT/l5Unpln/LCPuuPqk5B4QnKREREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGsMO0RERCRrDDtEREQka5amLoCIqCKhoVWPSUur/TqIqH7jnh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWThp1ly5bhmWeegYODAxwcHNCpUyds3bpVul8IgdjYWHh4eMDGxgZdu3bFiRMndObQaDSYOHEiXFxcYGdnh759++Ly5ct13QoRERGZKZOGnaZNm+LDDz/EoUOHcOjQIXTr1g39+vWTAs38+fOxcOFCLFmyBAcPHoRarUZ4eDgKCwulOWJiYpCSkoLk5GTs378ft2/fRmRkJIqLi03VFhEREZkRk4adPn36oHfv3vDz84Ofnx/+85//oGHDhvj1118hhMCiRYvwzjvvYODAgQgICMCqVatw9+5dJCUlAQDy8/MRHx+PBQsWICwsDG3btsWaNWvwxx9/YOfOnaZsjYiIiMyEpakLKFVcXIzvv/8ed+7cQadOnZCVlYXc3FxERERIY1QqFUJCQpCeno6xY8ciMzMTWq1WZ4yHhwcCAgKQnp6OHj16lLsujUYDjUYj3S4oKAAAaLVaaLXaWuqw5kprM+cajYF9yo+hvVpZ6TO3IRXVjidlm7JPeanPfepbs8nDzh9//IFOnTrh/v37aNiwIVJSUtCqVSukp6cDANzc3HTGu7m5ITs7GwCQm5sLKysrODk5lRmTm5tb4Trnzp2L2bNnl1m+Y8cO2Nra1rSlWpeammrqEuoE+5Sf6vY6fnzVY7ZsMbCYWvSkbFP2KS/1sc+7d+/qNc7kYadFixY4evQobt26hfXr12PkyJHYu3evdL9CodAZL4Qos+xxVY2ZOXMmpkyZIt0uKCiAp6cnIiIi4ODgYGAntU+r1SI1NRXh4eFQKpWmLqfWsE/5MbTXyMiqx2zaVIPCjOxJ2absU17qc5+lR2aqYvKwY2VlhaeffhoA0L59exw8eBCffvop3nrrLQAP9964u7tL4/Py8qS9PWq1GkVFRbh586bO3p28vDwEBwdXuE6VSgWVSlVmuVKprBcbur7UWVPss/4KDdW9bWX1cC/NgAFKFBU97DUtrep5ioqqHmOOT50ct2l52Ke81Mc+9a3X7D5nRwgBjUYDb29vqNVqnd1qRUVF2Lt3rxRkgoKCoFQqdcbk5OTg+PHjlYYdIiIienKYdM/O22+/jV69esHT0xOFhYVITk7Gnj17sG3bNigUCsTExCAuLg6+vr7w9fVFXFwcbG1tMWTIEACAo6MjoqOjMXXqVDRu3BjOzs6YNm0aAgMDERYWZsrWiIiIyEyYNOxcvXoVw4cPR05ODhwdHfHMM89g27ZtCA8PBwBMnz4d9+7dw/jx43Hz5k106NABO3bsgL29vTTHJ598AktLS0RFReHevXvo3r07EhMTYWFhYaq2iIiIyIyYNOzEx8dXer9CoUBsbCxiY2MrHGNtbY3Fixdj8eLFRq6OiIiI5MDsztkhIiIiMiaGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNUtTF0BE9UtoqKkrICKqHu7ZISIiIllj2CEiIiJZY9ghIiIiWWPYISIiIllj2CEiIiJZY9ghIiIiWeOl50RmTp9LvdPSar8OIqL6int2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNZMGnbmzp2L5557Dvb29nB1dUX//v1x+vRpnTGjRo2CQqHQ+enYsaPOGI1Gg4kTJ8LFxQV2dnbo27cvLl++XJetEBERkZkyadjZu3cvJkyYgF9//RWpqal48OABIiIicOfOHZ1xPXv2RE5OjvSzZcsWnftjYmKQkpKC5ORk7N+/H7dv30ZkZCSKi4vrsh0iIiIyQyb9ItBt27bp3E5ISICrqysyMzPRpUsXablKpYJarS53jvz8fMTHx+Prr79GWFgYAGDNmjXw9PTEzp070aNHj9prgIiIiMyeWZ2zk5+fDwBwdnbWWb5nzx64urrCz88PY8aMQV5ennRfZmYmtFotIiIipGUeHh4ICAhAenp63RROREREZsuke3YeJYTAlClT8MILLyAgIEBa3qtXL7zyyivw8vJCVlYW3nvvPXTr1g2ZmZlQqVTIzc2FlZUVnJycdOZzc3NDbm5uuevSaDTQaDTS7YKCAgCAVquFVquthe6Mo7Q2c67RGNinLisrfeYyRkX60aeexymVWp3/AvrVbG69V4WvXXlhn+ZP35oVQghRy7XoZcKECdi8eTP279+Ppk2bVjguJycHXl5eSE5OxsCBA5GUlITRo0frhBcACA8Ph4+PD5YvX15mjtjYWMyePbvM8qSkJNja2ta8GSIiIqp1d+/exZAhQ5Cfnw8HB4cKx5nFnp2JEyfixx9/xL59+yoNOgDg7u4OLy8vnD17FgCgVqtRVFSEmzdv6uzdycvLQ3BwcLlzzJw5E1OmTJFuFxQUwNPTExEREZU+Waam1WqRmpqK8PBwKJVKU5dTa9inrsjIqufatMmIhVVBn3oep1RqMWZMKr78Mhxa7cNe9am5Lns3xrr42pUX9mn+So/MVMWkYUcIgYkTJyIlJQV79uyBt7d3lY+5fv06Ll26BHd3dwBAUFAQlEolUlNTERUVBeDh3p/jx49j/vz55c6hUqmgUqnKLFcqlfViQ9eXOmuKfT5UVKTPHEYsqAr61FMRrVaJoqKHxepTc132bsx18bUrL+zTfOlbr0nDzoQJE5CUlIQffvgB9vb20jk2jo6OsLGxwe3btxEbG4t//vOfcHd3x4ULF/D222/DxcUFAwYMkMZGR0dj6tSpaNy4MZydnTFt2jQEBgZKV2cRERHRk8ukYWfZsmUAgK5du+osT0hIwKhRo2BhYYE//vgDq1evxq1bt+Du7o7Q0FCsXbsW9vb20vhPPvkElpaWiIqKwr1799C9e3ckJibCwsKiLtshIiIiM2Tyw1iVsbGxwfbt26ucx9raGosXL8bixYuNVRoRERHJhFl9zg4RERGRsTHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkaxZmroAIqoboaFVj0lLq/06iIjqGvfsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkayZNOzMnTsXzz33HOzt7eHq6or+/fvj9OnTOmOEEIiNjYWHhwdsbGzQtWtXnDhxQmeMRqPBxIkT4eLiAjs7O/Tt2xeXL1+uy1aIiIjITJk07OzduxcTJkzAr7/+itTUVDx48AARERG4c+eONGb+/PlYuHAhlixZgoMHD0KtViM8PByFhYXSmJiYGKSkpCA5ORn79+/H7du3ERkZieLiYlO0RURERGbE0pQr37Ztm87thIQEuLq6IjMzE126dIEQAosWLcI777yDgQMHAgBWrVoFNzc3JCUlYezYscjPz0d8fDy+/vprhIWFAQDWrFkDT09P7Ny5Ez169KjzvoiIiMh8GLRnJysry9h1AADy8/MBAM7OztJ6cnNzERERIY1RqVQICQlBeno6ACAzMxNarVZnjIeHBwICAqQxRERE9OQyaM/O008/jS5duiA6Ohovv/wyrK2ta1yIEAJTpkzBCy+8gICAAABAbm4uAMDNzU1nrJubG7Kzs6UxVlZWcHJyKjOm9PGP02g00Gg00u2CggIAgFarhVarrXEvtaW0NnOu0RjYpy4rK33mqnpMXc7zOKVSq/NfY67LWC8TY6yLr115YZ/mT9+aFUIIUd3Jjx8/jpUrV+Kbb76BRqPBoEGDEB0djeeff77ahZaaMGECNm/ejP3796Np06YAgPT0dHTu3Bl///033N3dpbFjxozBpUuXsG3bNiQlJWH06NE64QUAwsPD4ePjg+XLl5dZV2xsLGbPnl1meVJSEmxtbQ3ugYiIiOrO3bt3MWTIEOTn58PBwaHCcQbt2QkICMDChQsxf/58/PTTT0hMTMQLL7wAX19fREdHY/jw4WjSpIne802cOBE//vgj9u3bJwUdAFCr1QAe7r15NOzk5eVJe3vUajWKiopw8+ZNnb07eXl5CA4OLnd9M2fOxJQpU6TbBQUF8PT0RERERKVPlqlptVqkpqYiPDwcSqXS1OXUGvapKzKy6rk2bap6TF3O8zilUosxY1Lx5Zfh0GqVRl2XPvPowxjr4mtXXtin+Ss9MlOVGp2gbGlpiQEDBqB3795YunQpZs6ciWnTpmHmzJkYNGgQ5s2bpxNSHieEwMSJE5GSkoI9e/bA29tb535vb2+o1Wqkpqaibdu2AICioiLs3bsX8+bNAwAEBQVBqVQiNTUVUVFRAICcnBwcP34c8+fPL3e9KpUKKpWqzHKlUlkvNnR9qbOm2OdDRUX6zFH1mLqcpyJarRJFRUqjrstYLxFjrouvXXlhn+ZL33prdOn5oUOHMH78eLi7u2PhwoWYNm0azp8/j927d+PKlSvo169fpY+fMGEC1qxZg6SkJNjb2yM3Nxe5ubm4d+8eAEChUCAmJgZxcXFISUnB8ePHMWrUKNja2mLIkCEAAEdHR0RHR2Pq1KnYtWsXjhw5gmHDhiEwMFC6OouIiIieXAbt2Vm4cCESEhJw+vRp9O7dG6tXr0bv3r3RoMHD7OTt7Y0vvvgC/v7+lc6zbNkyAEDXrl11lickJGDUqFEAgOnTp+PevXsYP348bt68iQ4dOmDHjh2wt7eXxn/yySewtLREVFQU7t27h+7duyMxMREWFhaGtEdEREQyYlDYWbZsGV577TWMHj1aOq/mcc2aNUN8fHyl8+hzbrRCoUBsbCxiY2MrHGNtbY3Fixdj8eLFVc5HRERETxaDws7Zs2erHGNlZYWRI0caMj0RERGR0Rh0zk5CQgK+//77Msu///57rFq1qsZFERERERmLQWHnww8/hIuLS5nlrq6uiIuLq3FRRERERMZiUNjJzs4uc5k4AHh5eeHixYs1LoqIiIjIWAwKO66urjh27FiZ5b///jsaN25c46KIiIiIjMWgsDN48GC8+eabSEtLQ3FxMYqLi7F7925MmjQJgwcPNnaNRERERAYz6GqsOXPmIDs7G927d4el5cMpSkpKMGLECJ6zQ0R6CQ01dQVE9KQwKOxYWVlh7dq1+OCDD/D777/DxsYGgYGB8PLyMnZ9RERERDVSo+/G8vPzg5+fn7FqISIiIjI6g8JOcXExEhMTsWvXLuTl5aGkpETn/t27dxulOCKqWzy0RERyZFDYmTRpEhITE/HSSy8hICAACoXC2HURERERGYVBYSc5ORnfffcdevfubex6iIiqRZ+9UWlptV8HEZkvgy49t7KywtNPP23sWoiIiIiMzqCwM3XqVHz66ad6fWs5ERERkSkZdBhr//79SEtLw9atW9G6dWsolUqd+zds2GCU4oiIiIhqyqCw06hRIwwYMMDYtRAREREZnUFhJyEhwdh1EBEREdUKg87ZAYAHDx5g586d+OKLL1BYWAgA+Pvvv3H79m2jFUdERERUUwbt2cnOzkbPnj1x8eJFaDQahIeHw97eHvPnz8f9+/exfPlyY9dJREREZBCD9uxMmjQJ7du3x82bN2FjYyMtHzBgAHbt2mW04oiIiIhqyuCrsX755RdYWVnpLPfy8sKVK1eMUhgRERGRMRi0Z6ekpATFxcVlll++fBn29vY1LoqIiIjIWAwKO+Hh4Vi0aJF0W6FQ4Pbt25g1axa/QoKIiIjMikGHsT755BOEhoaiVatWuH//PoYMGYKzZ8/CxcUF3377rbFrJCIiIjKYQWHHw8MDR48exbfffovDhw+jpKQE0dHRGDp0qM4Jy0RERESmZlDYAQAbGxu89tpreO2114xZDxEREZFRGRR2Vq9eXen9I0aMMKgYIiJzFhpa+f1WVsD48XVTCxHpz6CwM2nSJJ3bWq0Wd+/ehZWVFWxtbRl2iIiIyGwYdDXWzZs3dX5u376N06dP44UXXuAJykRERGRWDP5urMf5+vriww8/LLPXh4iIiMiUDD5BuTwWFhb4+++/jTklkexFRgJFRaaugohIvgwKOz/++KPObSEEcnJysGTJEnTu3NkohREREREZg0Fhp3///jq3FQoFmjRpgm7dumHBggXGqIuIiIjIKAwKOyUlJcaug4iIiKhWGO0EZSIiIiJzZNCenSlTpug9duHChYasgoiIiMgoDAo7R44cweHDh/HgwQO0aNECAHDmzBlYWFigXbt20jiFQmGcKomIiIgMZFDY6dOnD+zt7bFq1So4OTkBePhBg6NHj8aLL76IqVOnGrVIIiIiIkMZdM7OggULMHfuXCnoAICTkxPmzJnDq7GIiIjIrBgUdgoKCnD16tUyy/Py8lBYWFjjooiIiIiMxaCwM2DAAIwePRrr1q3D5cuXcfnyZaxbtw7R0dEYOHCgsWskIiIiMphB5+wsX74c06ZNw7Bhw6DVah9OZGmJ6OhofPTRR0YtkIiopkJDTV0BEZmSQWHH1tYWS5cuxUcffYTz589DCIGnn34adnZ2xq6PiIiIqEZq9KGCOTk5yMnJgZ+fH+zs7CCEqNbj9+3bhz59+sDDwwMKhQIbN27UuX/UqFFQKBQ6Px07dtQZo9FoMHHiRLi4uMDOzg59+/bF5cuXa9IWUZVCQ6v+ISIi82BQ2Ll+/Tq6d+8OPz8/9O7dGzk5OQCA119/vVqXnd+5cwdt2rTBkiVLKhzTs2dPKVTl5ORgy5YtOvfHxMQgJSUFycnJ2L9/P27fvo3IyEgUFxcb0hoRERHJjEGHsSZPngylUomLFy+iZcuW0vJBgwZh8uTJel9+3qtXL/Tq1avSMSqVCmq1utz78vPzER8fj6+//hphYWEAgDVr1sDT0xM7d+5Ejx499OyIiIiI5MqgPTs7duzAvHnz0LRpU53lvr6+yM7ONkphpfbs2QNXV1f4+flhzJgxyMvLk+7LzMyEVqtFRESEtMzDwwMBAQFIT083ah1ERERUPxm0Z+fOnTuwtbUts/zatWtQqVQ1LqpUr1698Morr8DLywtZWVl477330K1bN2RmZkKlUiE3NxdWVlY6H24IAG5ubsjNza1wXo1GA41GI90uKCgAAGi1WunqMnNUWps512gM9aFPK6uqx1RVfml/SmXN+9TnqdKn5tpS2qMxejVnpf2Z82vXGOrDe9QY2Kf507dmhajuWcUAXnrpJbRr1w4ffPAB7O3tcezYMXh5eWHw4MEoKSnBunXrql2wQqFASkoK+vfvX+GYnJwceHl5ITk5GQMHDkRSUhJGjx6tE1wAIDw8HD4+Pli+fHm588TGxmL27NllliclJZUb4oiIiMj83L17F0OGDEF+fj4cHBwqHGfQnp2PPvoIXbt2xaFDh1BUVITp06fjxIkTuHHjBn755ReDi66Ku7s7vLy8cPbsWQCAWq1GUVERbt68qbN3Jy8vD8HBwRXOM3PmTJ1vbi8oKICnpyciIiIqfbJMTavVIjU1FeHh4VAqlaYup9bUhz4jI6ses2lT5feX9vnll+HQamvWZ1XrAvSrubYolVqMGWOcXs1ZaZ/m/No1hvrwHjUG9mn+So/MVMWgsNOqVSscO3YMy5Ytg4WFBe7cuYOBAwdiwoQJcHd3N2RKvVy/fh2XLl2S1hEUFASlUonU1FRERUUBeLj35/jx45g/f36F86hUqnIPtymVynqxoetLnTVlzn0WFVU9Rt/StVoliopq1qc+69Kn5tpmjF7rA3N+7RoT+5SX+tinvvVWO+yUnhD8xRdflHsoqDpu376Nc+fOSbezsrJw9OhRODs7w9nZGbGxsfjnP/8Jd3d3XLhwAW+//TZcXFwwYMAAAICjoyOio6MxdepUNG7cGM7Ozpg2bRoCAwOlq7OIiIjoyVbtsKNUKnH8+HEoFIoar/zQoUMIfeTT10oPLY0cORLLli3DH3/8gdWrV+PWrVtwd3dHaGgo1q5dC3t7e+kxn3zyCSwtLREVFYV79+6he/fuSExMhIWFRY3rIyIiovrPoMNYI0aMQHx8PD788MMarbxr166Vfury9u3bq5zD2toaixcvxuLFi2tUCxEREcmTQWGnqKgIX331FVJTU9G+ffsy34m1cOFCoxRHREREVFPVCjt//fUXmjdvjuPHj6Ndu3YAgDNnzuiMMcbhLSIiIiJjqVbY8fX1RU5ODtLS0gA8/HqIzz77DG5ubrVSHBEREVFNVevrIh4/v2br1q24c+eOUQsiIiIiMiaDvhurlAEfvkxERERUp6oVdhQKRZlzcniODhEREZmzap2zI4TAqFGjpE8fvn//PsaNG1fmaqwNGzYYr0IiIiKiGqhW2Bk5cqTO7WHDhhm1GCIiIiJjq1bYSUhIqK06iIiIiGpFjU5QJiIiIjJ3DDtEREQkaww7REREJGsMO0RERCRrDDtEREQkaww7REREJGvVuvSciMxTaKipKyAiMl/cs0NERESyxrBDREREssawQ0RERLLGsENERESyxrBDREREssawQ0RERLLGsENERESyxrBDREREssawQ0RERLLGsENERESyxq+LICIyQ/p8BUhaWu3XQSQH3LNDREREssawQ0RERLLGsENERESyxnN2iIiMLDISKCqq+H6ea0NUt7hnh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI2foEz0GH2+bZqIiOoP7tkhIiIiWWPYISIiIlkzadjZt28f+vTpAw8PDygUCmzcuFHnfiEEYmNj4eHhARsbG3Tt2hUnTpzQGaPRaDBx4kS4uLjAzs4Offv2xeXLl+uwCyIiIjJnJg07d+7cQZs2bbBkyZJy758/fz4WLlyIJUuW4ODBg1Cr1QgPD0dhYaE0JiYmBikpKUhOTsb+/ftx+/ZtREZGori4uK7aICIiIjNm0hOUe/XqhV69epV7nxACixYtwjvvvIOBAwcCAFatWgU3NzckJSVh7NixyM/PR3x8PL7++muEhYUBANasWQNPT0/s3LkTPXr0qLNeiIiIyDyZ7Tk7WVlZyM3NRUREhLRMpVIhJCQE6enpAIDMzExotVqdMR4eHggICJDGEBER0ZPNbC89z83NBQC4ubnpLHdzc0N2drY0xsrKCk5OTmXGlD6+PBqNBhqNRrpdUFAAANBqtdBqtUapvzaU1mbONRqDqfu0sjLOPFWVX9qfUinv7Qn8r0e596pvn/q8tPV5HZrqV4Gp36N1hX2aP31rNtuwU0qhUOjcFkKUWfa4qsbMnTsXs2fPLrN8x44dsLW1NazQOpSammrqEuqEqfocP94482zZot+4MWOejO0JPDm9VtWnPq8NfV6H+r7Gagt/F8lLfezz7t27eo0z27CjVqsBPNx74+7uLi3Py8uT9vao1WoUFRXh5s2bOnt38vLyEBwcXOHcM2fOxJQpU6TbBQUF8PT0REREBBwcHIzditFotVqkpqYiPDwcSqXS1OXUGlP3GRlpnHk2bar8/tI+v/wyHFqtfLcn8HBPx5gx8u9V3z6rem0A+r0O9ZmnNpj6PVpX2Kf5Kz0yUxWzDTve3t5Qq9VITU1F27ZtAQBFRUXYu3cv5s2bBwAICgqCUqlEamoqoqKiAAA5OTk4fvw45s+fX+HcKpUKKpWqzHKlUlkvNnR9qbOmTNVnUZFx5nnkVLJyWVk9/Otdq1WiqEj+2xN4cnqtqk99Xtb6vA5N/WuAv4vkpT72qW+9Jg07t2/fxrlz56TbWVlZOHr0KJydndGsWTPExMQgLi4Ovr6+8PX1RVxcHGxtbTFkyBAAgKOjI6KjozF16lQ0btwYzs7OmDZtGgIDA6Wrs4iIiOjJZtKwc+jQIYQ+8kVEpYeWRo4cicTEREyfPh337t3D+PHjcfPmTXTo0AE7duyAvb299JhPPvkElpaWiIqKwr1799C9e3ckJibCwsKizvshIiIi82PSsNO1a1cIISq8X6FQIDY2FrGxsRWOsba2xuLFi7F48eJaqJCIiIjqO7P9nB0iIiIiY2DYISIiIlkz26uxiIjk6pFTFYmoDnDPDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRo/QZlkQ59PpU1Lq/06iIjIvHDPDhEREckaww4RERHJGg9jERE94XgImOSOe3aIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYYdoiIiEjWGHaIiIhI1hh2iIiISNYsTV0AUV0KDTV1BUREVNe4Z4eIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjWGHiIiIZI1hh4iIiGSNYYeIiIhkjZ+zQ0RUT+nzuVFpabVfB5G5454dIiIikjWzDjuxsbFQKBQ6P2q1WrpfCIHY2Fh4eHjAxsYGXbt2xYkTJ0xYMREREZkbsw47ANC6dWvk5ORIP3/88Yd03/z587Fw4UIsWbIEBw8ehFqtRnh4OAoLC01YMREREZkTsw87lpaWUKvV0k+TJk0APNyrs2jRIrzzzjsYOHAgAgICsGrVKty9exdJSUkmrpqIiIjMhdmHnbNnz8LDwwPe3t4YPHgw/vrrLwBAVlYWcnNzERERIY1VqVQICQlBenq6qcolIiIiM2PWV2N16NABq1evhp+fH65evYo5c+YgODgYJ06cQG5uLgDAzc1N5zFubm7Izs6udF6NRgONRiPdLigoAABotVpotVojd2E8pbWZc43GYGifVla1UU3tUSq1Ov+VsyelV3PsU5+3kT7vnUfn4e8ieanPfepbs0IIIWq5FqO5c+cOfHx8MH36dHTs2BGdO3fG33//DXd3d2nMmDFjcOnSJWzbtq3CeWJjYzF79uwyy5OSkmBra1srtRMREZFx3b17F0OGDEF+fj4cHBwqHGfWe3YeZ2dnh8DAQJw9exb9+/cHAOTm5uqEnby8vDJ7ex43c+ZMTJkyRbpdUFAAT09PREREVPpkmZpWq0VqairCw8OhVCpNXU6tMbTPyMhaLKoWKJVajBmTii+/DIdWK9/tCTw5vZpjn5s2VT1Gn/fOo/Pwd5G81Oc+S4/MVKVehR2NRoNTp07hxRdfhLe3N9RqNVJTU9G2bVsAQFFREfbu3Yt58+ZVOo9KpYJKpSqzXKlU1osNXV/qrKnq9llUVIvF1CKtVomiIvlvT+DJ6dWc+tTnLaTPe6e8efi7SF7qY5/61mvWYWfatGno06cPmjVrhry8PMyZMwcFBQUYOXIkFAoFYmJiEBcXB19fX/j6+iIuLg62trYYMmSIqUsnIiIiM2HWYefy5ct49dVXce3aNTRp0gQdO3bEr7/+Ci8vLwDA9OnTce/ePYwfPx43b95Ehw4dsGPHDtjb25u4ciIiIjIXZh12kpOTK71foVAgNjYWsbGxdVMQEVE9o8/3ZxHJndl/zg4RERFRTTDsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsmfUnKBOV4qfAEhGRobhnh4iIiGSNYYeIiIhkjYexiIjIKPQ53JyWVvt1ED2Oe3aIiIhI1hh2iIiISNZ4GIsMxl3WRERUH3DPDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRrDDhEREckaww4RERHJGj9UkGoVP3iQSB4efS9bWQHjxwORkUBRkelqItIX9+wQERGRrHHPDhER1Rnu7SVTYNghk3v8lx93kRMRkTHxMBYRERHJGsMOERERyRrDDhEREckaww4RERHJGsMOERERyRqvxqJy6XN5KBGRqfASdqoO7tkhIiIiWeOeHZnhXztERES6uGeHiIiIZI1hh4iIiGSNYYeIiIhkjefsPIF4pRURmTP+jiJjY9gxA/q+sfkFmURERNXHsENERE+syv7YLP0Dk+o/hh0iIqI6wI8GMR3ZnKC8dOlSeHt7w9raGkFBQfj5559NXRIRERGZAVns2Vm7di1iYmKwdOlSdO7cGV988QV69eqFkydPolmzZiatjSfaERGZhrF+/+pzniT3yJg3WezZWbhwIaKjo/H666+jZcuWWLRoETw9PbFs2TJTl0ZEREQmVu/DTlFRETIzMxEREaGzPCIiAunp6SaqioiIiMxFvT+Mde3aNRQXF8PNzU1nuZubG3Jzc8t9jEajgUajkW7n5+cDAG7cuAGtVmvU+hoYNU5qcffuXQDX0aCB0pgTmxn2KT9PSq/sU1707/P69apn0+ffg+7dqx7z3XdVj6kOrfZhn9evX4dSWf3tGRVV9Rhj11yqsLAQACCEqHygqOeuXLkiAIj09HSd5XPmzBEtWrQo9zGzZs0SAPjDH/7whz/84Y8Mfi5dulRpVqj3e3ZcXFxgYWFRZi9OXl5emb09pWbOnIkpU6ZIt0tKSnDjxg00btwYCoWiVuutiYKCAnh6euLSpUtwcHAwdTm1hn3Kz5PSK/uUF/Zp/oQQKCwshIeHR6Xj6n3YsbKyQlBQEFJTUzFgwABpeWpqKvr161fuY1QqFVQqlc6yRo0a1WaZRuXg4FDvXpCGYJ/y86T0yj7lhX2aN0dHxyrH1PuwAwBTpkzB8OHD0b59e3Tq1AkrVqzAxYsXMW7cOFOXRkRERCYmi7AzaNAgXL9+He+//z5ycnIQEBCALVu2wMvLy9SlERERkYnJIuwAwPjx4zFe5l9iolKpMGvWrDKH4OSGfcrPk9Ir+5QX9ikfCiGqul6LiIiIqP6q9x8qSERERFQZhh0iIiKSNYYdIiIikjWGHSIiIpI1hh0zcvPmTQwfPhyOjo5wdHTE8OHDcevWrUofo1Aoyv356KOPpDFdu3Ytc//gwYNruZuKGdLnqFGjyvTQsWNHnTEajQYTJ06Ei4sL7Ozs0LdvX1y+fLkWO6ladXvVarV46623EBgYCDs7O3h4eGDEiBH4+++/dcaZepsuXboU3t7esLa2RlBQEH7++edKx+/duxdBQUGwtrbGP/7xDyxfvrzMmPXr16NVq1ZQqVRo1aoVUlJSaqt8vVWnzw0bNiA8PBxNmjSBg4MDOnXqhO3bt+uMSUxMLPf9ev/+/dpupVLV6XPPnj3l9vDnn3/qjDPH7QlUr9fyfu8oFAq0bt1aGmOO23Tfvn3o06cPPDw8oFAosHHjxiofU1/fo3ozzjdUkTH07NlTBAQEiPT0dJGeni4CAgJEZGRkpY/JycnR+Vm5cqVQKBTi/Pnz0piQkBAxZswYnXG3bt2q7XYqZEifI0eOFD179tTp4fr16zpjxo0bJ5566imRmpoqDh8+LEJDQ0WbNm3EgwcParOdSlW311u3bomwsDCxdu1a8eeff4qMjAzRoUMHERQUpDPOlNs0OTlZKJVK8eWXX4qTJ0+KSZMmCTs7O5GdnV3u+L/++kvY2tqKSZMmiZMnT4ovv/xSKJVKsW7dOmlMenq6sLCwEHFxceLUqVMiLi5OWFpail9//bVOeipPdfucNGmSmDdvnjhw4IA4c+aMmDlzplAqleLw4cPSmISEBOHg4FDmfWtK1e0zLS1NABCnT5/W6eHR95k5bk8hqt/rrVu3dHq8dOmScHZ2FrNmzZLGmOM23bJli3jnnXfE+vXrBQCRkpJS6fj6+h6tDoYdM3Hy5EkBQOeFk5GRIQCIP//8U+95+vXrJ7p166azLCQkREyaNMlYpdaIoX2OHDlS9OvXr8L7b926JZRKpUhOTpaWXblyRTRo0EBs27bNKLVXl7G26YEDBwQAnV/Iptymzz//vBg3bpzOMn9/fzFjxoxyx0+fPl34+/vrLBs7dqzo2LGjdDsqKkr07NlTZ0yPHj3E4MGDjVR19VW3z/K0atVKzJ49W7qdkJAgHB0djVWiUVS3z9Kwc/PmzQrnNMftKUTNt2lKSopQKBTiwoUL0jJz3KaP0ifs1Nf3aHXwMJaZyMjIgKOjIzp06CAt69ixIxwdHZGenq7XHFevXsXmzZsRHR1d5r5vvvkGLi4uaN26NaZNm4bCwkKj1V4dNelzz549cHV1hZ+fH8aMGYO8vDzpvszMTGi1WkREREjLPDw8EBAQoPfzZ2zG2KYAkJ+fD4VCUeb720yxTYuKipCZmanzPANAREREhT1lZGSUGd+jRw8cOnQIWq220jGm2naG9Pm4kpISFBYWwtnZWWf57du34eXlhaZNmyIyMhJHjhwxWt3VVZM+27ZtC3d3d3Tv3h1paWk695nb9gSMs03j4+MRFhZW5tP5zWmbGqI+vkerSzafoFzf5ebmwtXVtcxyV1fXMt/oXpFVq1bB3t4eAwcO1Fk+dOhQeHt7Q61W4/jx45g5cyZ+//13pKamGqX26jC0z169euGVV16Bl5cXsrKy8N5776Fbt27IzMyESqVCbm4urKys4OTkpPM4Nzc3vZ8/YzPGNr1//z5mzJiBIUOG6HxBn6m26bVr11BcXAw3Nzed5ZU9z7m5ueWOf/DgAa5duwZ3d/cKx5hq2xnS5+MWLFiAO3fuICoqSlrm7++PxMREBAYGoqCgAJ9++ik6d+6M33//Hb6+vkbtQR+G9Onu7o4VK1YgKCgIGo0GX3/9Nbp37449e/agS5cuACre5qbankDNt2lOTg62bt2KpKQkneXmtk0NUR/fo9XFsFPLYmNjMXv27ErHHDx4EMDDk40fJ4Qod3l5Vq5ciaFDh8La2lpn+ZgxY6T/DwgIgK+vL9q3b4/Dhw+jXbt2es1dldruc9CgQdL/BwQEoH379vDy8sLmzZvLhLvqzGuIutqmWq0WgwcPRklJCZYuXapzX11s08o8Xn9VPZU3/vHl1Z2zLhha07fffovY2Fj88MMPOoG3Y8eOOifWd+7cGe3atcPixYvx2WefGa/waqpOny1atECLFi2k2506dcKlS5fw8ccfS2GnunPWJUPrSkxMRKNGjdC/f3+d5ea6Taurvr5H9cWwU8veeOONKq+Sad68OY4dO4arV6+Wue+///1vmTRdnp9//hmnT5/G2rVrqxzbrl07KJVKnD171mj/MNZVn6Xc3d3h5eWFs2fPAgDUajWKiopw8+ZNnb07eXl5CA4O1ntefdRFr1qtFlFRUcjKysLu3bt19uqUpza2aXlcXFxgYWFR5q+5vLy8CntSq9Xljre0tETjxo0rHVOd14QxGdJnqbVr1yI6Ohrff/89wsLCKh3boEEDPPfcc9LruK7VpM9HdezYEWvWrJFum9v2BGrWqxACK1euxPDhw2FlZVXpWFNvU0PUx/dodfGcnVrm4uICf3//Sn+sra3RqVMn5Ofn48CBA9Jjf/vtN+Tn5+v1j3V8fDyCgoLQpk2bKseeOHECWq0W7u7uNertUXXVZ6nr16/j0qVLUg9BQUFQKpU6h3FycnJw/Phxo4ed2u61NOicPXsWO3fulH7ZVKY2tml5rKysEBQUVOZwWWpqaoU9derUqcz4HTt2oH379lAqlZWOMfa205chfQIP9+iMGjUKSUlJeOmll6pcjxACR48erfXtVhFD+3zckSNHdHowt+0J1KzXvXv34ty5c+WeD/k4U29TQ9TH92i11f050VSRnj17imeeeUZkZGSIjIwMERgYWOYy5RYtWogNGzboLMvPzxe2trZi2bJlZeY8d+6cmD17tjh48KDIysoSmzdvFv7+/qJt27YmuyS7un0WFhaKqVOnivT0dJGVlSXS0tJEp06dxFNPPSUKCgqkx4wbN040bdpU7Ny5Uxw+fFh069bNLC49r06vWq1W9O3bVzRt2lQcPXpU51JWjUYjhDD9Ni29fDc+Pl6cPHlSxMTECDs7O+kKlRkzZojhw4dL40sva508ebI4efKkiI+PL3NZ6y+//CIsLCzEhx9+KE6dOiU+/PBDk1/WWt0+k5KShKWlpfj8888r/EiA2NhYsW3bNnH+/Hlx5MgRMXr0aGFpaSl+++23Ou+vVHX7/OSTT0RKSoo4c+aMOH78uJgxY4YAINavXy+NMcftKUT1ey01bNgw0aFDh3LnNMdtWlhYKI4cOSKOHDkiAIiFCxeKI0eOSFd0yuU9Wh0MO2bk+vXrYujQocLe3l7Y29uLoUOHlrm8E4BISEjQWfbFF18IGxubcj9n5eLFi6JLly7C2dlZWFlZCR8fH/Hmm2+W+YyaulTdPu/evSsiIiJEkyZNhFKpFM2aNRMjR44UFy9e1HnMvXv3xBtvvCGcnZ2FjY2NiIyMLDOmrlW316ysLAGg3J+0tDQhhHls088//1x4eXkJKysr0a5dO7F3717pvpEjR4qQkBCd8Xv27BFt27YVVlZWonnz5uUG8++//160aNFCKJVK4e/vr/OPp6lUp8+QkJByt9vIkSOlMTExMaJZs2bCyspKNGnSRERERIj09PQ67Kh81elz3rx5wsfHR1hbWwsnJyfxwgsviM2bN5eZ0xy3pxDVf+3eunVL2NjYiBUrVpQ7nzlu09KPB6jotSin96i+FEL8/7OQiIiIiGSI5+wQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEBERkawx7BAREZGsMewQERGRrDHsEJFJKRQKbNy4Ubr9559/omPHjrC2tsazzz5b4TIiMo19+/ahT58+8PDwKPP+rS1XrlzBsGHD0LhxY9ja2uLZZ59FZmam3o9n2CGiWjFq1CgoFAooFAoolUq4ubkhPDwcK1euRElJiTQuJycHvXr1km7PmjULdnZ2OH36NHbt2lXhMiIyjTt37qBNmzZYsmRJnazv5s2b6Ny5M5RKJbZu3YqTJ09iwYIFaNSokd5z8FvPiajW9OzZEwkJCSguLsbVq1exbds2TJo0CevWrcOPP/4IS0tLqNVqncecP38eL730Ery8vCpdVl1FRUVVfmM1EVWtV69eOn+gPK6oqAjvvvsuvvnmG9y6dQsBAQGYN28eunbtatD65s2bB09PTyQkJEjLmjdvXq05uGeHiGqNSqWCWq3GU089hXbt2uHtt9/GDz/8gK1btyIxMRGA7mEshUKBzMxMvP/++1AoFIiNjS13GfBwt/agQYPg5OSExo0bo1+/frhw4YK07lGjRqF///6YO3cuPDw84OfnV63Hffzxx3B3d0fjxo0xYcIEaLVaaYxGo8H06dPh6ekJlUoFX19fxMfHS/efPHkSvXv3RsOGDeHm5obhw4fj2rVrtfIcE5mb0aNH45dffkFycjKOHTuGV155BT179sTZs2cNmu/HH39E+/bt8corr8DV1RVt27bFl19+Wa05GHaIqE5169YNbdq0wYYNG8rcl5OTg9atW2Pq1KnIycnBtGnTyl129+5dhIaGomHDhti3bx/279+Phg0bomfPnigqKpLm27VrF06dOoXU1FRs2rRJ78elpaXh/PnzSEtLw6pVq5CYmCiFMwAYMWIEkpOT8dlnn+HUqVNYvnw5GjZsKPUQEhKCZ599FocOHcK2bdtw9epVREVF1d6TSmQmzp8/j2+//Rbff/89XnzxRfj4+GDatGl44YUXdPbMVMdff/2FZcuWwdfXF9u3b8e4cePw5ptvYvXq1XrPwcNYRFTn/P39cezYsTLL1Wo1LC0t0bBhQ+nwVsOGDcssW7lyJRo0aICvvvoKCoUCAJCQkIBGjRphz549iIiIAADY2dnhq6++kg5f6fs4JycnLFmyBBYWFvD398dLL72EXbt2YcyYMThz5gy+++47pKamIiwsDADwj3/8Q+ph2bJlaNeuHeLi4qRlK1euhKenJ86cOSPtYSKSo8OHD0MIUeZ1rtFo0LhxYwDAhQsX4O3tXek8EyZMkM4JKikpQfv27aX3VNu2bXHixAksW7YMI0aM0Ksuhh0iqnNCCClsGCIzMxPnzp2Dvb29zvL79+/j/Pnz0u3AwECd83T0fVzr1q1hYWEh3XZ3d8cff/wBADh69CgsLCwQEhJSYW1paWnSnp5HnT9/nmGHZK2kpAQWFhbIzMzUeQ8BkN4TTz31FE6dOlXpPE5OTtL/u7u7o1WrVjr3t2zZEuvXr9e7LoYdIqpzp06dqvIvu8qUlJQgKCgI33zzTZn7mjRpIv2/nZ2dQY9TKpU69ykUCukKMhsbmypr69OnD+bNm1fmPnd390ofS1TftW3bFsXFxcjLy8OLL75Y7hilUgl/f3+95+zcuTNOnz6ts+zMmTPVumCBYYeI6tTu3bvxxx9/YPLkyQbP0a5dO6xduxaurq5wcHCo9cc9KjAwECUlJdi7d690GOvxdaxfvx7NmzeHpSV/xZL83L59G+fOnZNuZ2Vl4ejRo3B2doafnx+GDh2KESNGYMGCBWjbti2uXbuG3bt3IzAwEL179672+iZPnozg4GDExcUhKioKBw4cwIoVK7BixQq95+AJykRUazQaDXJzc3HlyhUcPnwYcXFx6NevHyIjI/U+1l6eoUOHwsXFBf369cPPP/+MrKws7N27F5MmTcLly5eN/rhHNW/eHCNHjsRrr72GjRs3IisrC3v27MF3330H4OG5Bjdu3MCrr76KAwcO4K+//sKOHTvw2muvobi42OCeiczFoUOH0LZtW7Rt2xYAMGXKFLRt2xb//ve/ATw8D27EiBGYOnUqWrRogb59++K3336Dp6enQet77rnnkJKSgm+//RYBAQH44IMPsGjRIgwdOlTvOfhnBxHVmm3btsHd3R2WlpZwcnJCmzZt8Nlnn2HkyJFo0MDwv7VsbW2xb98+vPXWWxg4cCAKCwvx1FNPoXv37pXusTH0cY9btmwZ3n77bYwfPx7Xr19Hs2bN8PbbbwMAPDw88Msvv+Ctt95Cjx49oNFo4OXlhZ49e9aoZyJz0bVrVwghKrxfqVRi9uzZmD17ttHWGRkZicjISIMfrxCVVUxERERUz/HPDCIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikjWGHSIiIpI1hh0iIiKSNYYdIiIikrX/BzG+SOTUJuwaAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "differences = hf_model.model.layers[0].block_sparse_moe(test_tensor)[0] - tl_model.blocks[0].mlp(test_tensor)\n", + "\n", + "# Flatten the differences to create a one-dimensional tensor\n", + "flattened_differences = differences.flatten().cpu().detach().numpy()\n", + "\n", + "# Plot the histogram of the differences\n", + "plt.hist(flattened_differences, bins=50, alpha=0.75, color='blue')\n", + "plt.title('Differences Between MLP Outputs')\n", + "plt.xlabel('Difference')\n", + "plt.ylabel('Frequency')\n", + "plt.grid(True)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "ac306e1c-9972-466a-8f4a-f3eb56042f53", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0.46239426732063293" + ] + }, + "execution_count": 35, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "hf_model.model.layers[0].block_sparse_moe(test_tensor)[0][0, 0, 0].item()" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "id": "e9481397-6e87-435a-a0cf-ef409630d17c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0.4623942971229553" + ] + }, + "execution_count": 36, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tl_model.blocks[0].mlp(test_tensor)[0, 0, 0].item()" + ] + }, + { + "cell_type": "markdown", + "id": "8176dc01-375b-4b48-b9f0-10efc4548eaf", + "metadata": {}, + "source": [ + "## Compare Attention Outputs" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "id": "5172efa2-0066-4ae0-a6a2-530d815b053b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[[-0.3395, 0.2164, -0.0300, ..., 0.1450, 0.0525, -0.1044]]],\n", + " grad_fn=)" + ] + }, + "execution_count": 37, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tl_model.blocks[0].attn.forward(test_tensor, test_tensor, test_tensor)" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "92781a06-e16d-43f9-be4c-3ef04b3d4b08", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[[-0.3395, 0.2164, -0.0300, ..., 0.1450, 0.0525, -0.1044]]],\n", + " grad_fn=)" + ] + }, + "execution_count": 38, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "hf_model.model.layers[0].self_attn.forward(test_tensor)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "943cd506-2bb8-45bf-afc7-7f6b4f8043f1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[[False, False, False, ..., False, False, False]]])" + ] + }, + "execution_count": 39, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "(tl_model.blocks[0].attn.forward(test_tensor, test_tensor, test_tensor) == \n", + " hf_model.model.layers[0].self_attn.forward(test_tensor)[0])" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "57ffc181-abed-4784-86eb-6e6b4f174bc5", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor(236)" + ] + }, + "execution_count": 40, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.sum(tl_model.blocks[0].attn.forward(test_tensor, test_tensor, test_tensor) == \n", + " hf_model.model.layers[0].self_attn.forward(test_tensor)[0])" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "7427fd15-3029-45c3-9d12-64c80f1048f1", + "metadata": { + "jupyter": { + "source_hidden": true + } + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjsAAAHFCAYAAAAUpjivAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8fJSN1AAAACXBIWXMAAA9hAAAPYQGoP6dpAABJnUlEQVR4nO3de1xU5d7///cIw1E8gYIkIhlqJqVpWWapKXjCSis7WGqa27Zmknrbbd7fHbYN09Js484O21N56myHuxTMQ5l2a5SZ1tYyjwmRSuIRRrh+f/hjdiMgMAMOLF/Px4OHrmuutdZnXTPAm2vWmmUzxhgBAABYVC1vFwAAAFCVCDsAAMDSCDsAAMDSCDsAAMDSCDsAAMDSCDsAAMDSCDsAAMDSCDsAAMDSCDsAAMDSCDuoMgsXLpTNZnN+BQQEKCIiQt26ddO0adOUnZ1dbJ3k5GTZbDaXtvz8fD3yyCNq3LixfHx81LZtW0nS0aNHde+996pRo0ay2Wy64447LsJR1Vx79+51eT5sNpvq1Kmja665RrNnz1ZBQYFb2/3kk0+UnJxcucVWU4cPH5a/v79sNpu+/vrrEvukpKRoxYoVxdp/+OEHJScna+/evVVbZBl1rFu3TjabTevWrbsodZzPGKOlS5fq1ltvVf369eXv76/LL79co0eP1oEDB9ze7qFDh5ScnKytW7dWXrEXsHHjRiUnJ+uPP/64KPuDhwxQRRYsWGAkmQULFphNmzaZzz//3LzzzjsmKSnJ1K1b1zRo0MCkp6e7rHPgwAGzadMml7bZs2cbSSY1NdVs3LjRbNu2zRhjTFJSkvHz8zOLFy82mzZtMjt37rxox1YT7dmzx0gyY8aMMZs2bTKbNm0yn376qfnrX/9qJJlx48a5td3Ro0ebS+VHyaxZs4wkI8k88sgjJfYJDg42Q4YMKdb+9ttvG0lm7dq1VVtkGXUcO3bMbNq0yRw7duyi1PFnBQUF5p577jGSzH333WdWrFhh1q5da1588UXTpEkTU69ePbNhwwa3tr1lyxbnz5uL4bnnnjOSzJ49ey7K/uAZXy9lLFxC2rRpow4dOjiX77zzTj3++OPq3LmzBgwYoJ9++knh4eGSpCZNmqhJkyYu62/fvl2BgYF69NFHi7U3b95cgwYNqrRaT58+rcDAwErbXnXUtGlT3XDDDc7lXr16afv27Vq2bJlmzpzpxcqqv/nz56tRo0aKjo7WsmXLNGvWrBr3eqlTp47L838xTZ8+XW+++aaeffZZPfHEE872rl276p577lHHjh1155136t///rfq1avnlRphUd5OW7CuopmdLVu2lPj4W2+9ZSSZKVOmONueeuopl1kC/f9/Rf/5q2i7538V/cWcl5dn/v73v5uWLVsaPz8/ExYWZoYOHWqys7Nd9h8dHW369u1r3n33XdO2bVvj7+9vnnjiCWOMMZmZmeYvf/mLueyyy4zdbjfNmjUzycnJxuFwONcvmil57rnnzMyZM02zZs1McHCwueGGG4rNThljzFdffWUSExNNgwYNjL+/v7n88svN2LFjXfrs2rXL3HfffaZhw4bGz8/PtGrVysyZM8elT0FBgfn73/9uWrRoYQICAkzdunVNXFycmT179gWfjz/Xe77ExETTtGnTYu3Lly83N9xwgwkKCjLBwcEmISHBfPPNN87HhwwZUuJzsWfPHnPXXXeZ1q1bF9uPJPPWW2852zIyMowk8+GHHzrbyjP+xlT8uf70009Nu3btTEBAgGnZsqWZN2/eBcfsz7766isjyYwfP968+uqrRpJ54403XPqUNBZdunQp9TX751mI9PR0c+utt5qQkBATGBhoOnXqZFavXu2y/aLvj+3bt5t7773X1KlTxzRq1Mg89NBD5o8//iizDmOMWbt2bYkzTB988IG54YYbTGBgoKldu7bp0aOH2bhxo1v7L0leXp6pX7++ufLKK01hYWGJfZYuXWokmeeff97ZFh0dXeIMVZcuXYod0/lfTz31lDHm3Os0ODjYbN++3dx6660mKCjIhIWFmdGjR5uTJ086t1n0PVLS7NCft1c0DqX9DPrss89Mly5dTIMGDUxAQICJiooyAwYMcNkXLi7CDqpMWWHnxIkTxsfHx3Tv3t3Zdn7Y2bRpk+nTp48JDAx0vvWSlZVlNm3aZNq1a2cuv/xyZ/uxY8dMQUGB6dWrlwkODjZTpkwx6enp5l//+pe57LLLTOvWrc2pU6ec246OjjaNGzc2l19+uZk/f75Zu3at2bx5s8nMzDRRUVEmOjravPLKK2b16tXm73//u/H39zdDhw51rl/0g7FZs2amV69eZsWKFWbFihUmLi7O1K9f3+WH/8qVK43dbjdXX321WbhwoVmzZo2ZP3++uffee519duzY4Qwur7/+uklLSzPjx483tWrVMsnJyc5+06ZNMz4+Puapp54yn332mVm5cqWZPXu2S5+SFNU7ffp043A4jMPhMIcPHzbz5s0zvr6+ZvLkyS79n3nmGWOz2cywYcPMxx9/bN577z1z4403muDgYLNjxw5jjDE///yzueuuu4wk5/OwadMmc+bMGfPyyy8bSebQoUPGGGMcDofzF/mIESOc+5k+fbrx9fU1ubm5xhhT7vGv6HPdpEkT07p1a/P666+bVatWmbvvvttIMuvXr7/guBUZMWKEkWR27NhhcnNzTVBQkOnatatLn02bNpnAwEDTp08f51js2LHDZGdnm5SUFCPJ/POf/3Q+VhTK3njjDWOz2cwdd9xh3nvvPfPRRx+ZxMRE4+Pj4xJ4ir4/WrZsaf72t7+Z9PR0M2vWLOPv728eeuihMuswpuSws2TJEiPJJCQkmBUrVpg333zTtG/f3vj5+ZkvvviiwvsvycaNG40k5x8UJTl+/LipVauW6dmzp7OtPGHn2LFjzp83//M//+M85gMHDhhjzoUdPz8/07RpU/PMM8+YtLQ0k5ycbHx9fU1iYqJzm+UNOwcOHDBjxowxksx7773n8jNoz549JiAgwMTHx5sVK1aYdevWmSVLlpgHH3zQ5OTkXHCMUHUIO6gyZYUdY4wJDw83V155pXP5/LBjzH/+Kjtfly5dzFVXXeXStmzZMiPJvPvuuy7tRe/nv/TSS8626Oho4+PjU+xcn5EjR5ratWubffv2ubQ///zzzl92xvznB2NcXJw5e/ass9/mzZuNJLNs2TJnW/PmzU3z5s3N6dOnSx2Lnj17miZNmhQ7l+LRRx81AQEB5ujRo8aYc7Mjbdu2LXU7pSmqt6SvoUOHuhzD/v37ja+vrxkzZozLNo4fP24iIiLMwIEDnW2lnbPz888/G0nm9ddfN8YYs2HDBiPJTJw40cTExDj7xcfHm06dOjmXyzv+FX2uAwICXLZ5+vRp06BBAzNy5Mgyx+7kyZOmTp065oYbbnC2DRkyxNhsNvPzzz+79K3oOTsnT540DRo0MP369XNpLygoMNdcc425/vrrnW1F3x8zZsxw6Ttq1CgTEBDgMmNSWh3nh52CggITGRlp4uLiTEFBgbPf8ePHTaNGjVyem4rs/3zLly83kszLL79cah9jiv9MKE/YMebC5+wUzUC++OKLLu3PPPOMkeQ8T6i8YceY0s/Zeeedd4wks3Xr1gseJy4ursaCVxljKnV7H3/8serVq6d+/frp7Nmzzq+2bdsqIiKi2BUoV199tVq0aFFsG926dVNkZKTLNnr37i1JWr9+vUv/vn37ysfHx2WbkrRv3z5J0q5du7R7924NHz5cAQEBJdZ95swZffbZZ+rfv7+CgoJc9tunTx+dOXNGX331lSTp+uuv13fffadRo0Zp1apVys3NrdAYjR07Vlu2bNGWLVu0du1apaSk6K233tJ9993n7LNq1SqdPXtWgwcPdqklICBAXbp0KdeVPM2bN1ezZs20evVqSVJ6erri4uL0wAMPaM+ePdq9e7fy8vK0YcMG9ejRw7leece/os9127Zt1bRpU+dyQECAWrRo4XyeLuStt95Sbm6uhg0b5mwbNmyYjDFasGBBmetfyMaNG3X06FENGTLE5TgKCwvVq1cvbdmyRSdPnnRZ57bbbnNZvvrqq3XmzJkSr3Asy86dO3Xo0CE9+OCDqlXrP78SateurTvvvFNfffWVTp06VWX7P58xptgVmZXl/PP77r//fknS2rVrK20fbdu2lZ+fn/7yl79o0aJF+uWXXypt23AfJyjDa06ePKkjR44oLi6u0rb522+/6Y8//pCfn1+Jjx8+fNhluXHjxiVu46OPPpLdbi/XNkJDQ12W/f39JZ072VmSfv/9d0kqduL1nx05ckRnz55VamqqUlNTL7jfSZMmKTg4WIsXL9bLL78sHx8f3XLLLZo+fbrLieCladKkiUu/rl27ymazadKkSVq1apV69uyp3377TZJ03XXXlbiNP/9SvJDu3btr5cqVkqTVq1crPj5ecXFxCg8P1+rVqxUbG6vTp0+7hJ3yjn9Fn+vznyfp3HNV9DxdyLx58xQQEKBevXo5LzW++uqr1axZMy1cuFBTpkxxCbwVUTTWd911V6l9jh49quDgYOdyWa+5ijhy5Iikkr8XIiMjVVhYqJycHAUFBXm0/6KguWfPnlL7nDx5UocPH1a7du3KfwDl5OvrW6zuiIgISf8Zg8rQvHlzrV69WjNmzNDo0aN18uRJXX755Xrsscc0duzYStsPKoawA6/53//9XxUUFKhr166Vts2wsDCFhoY6f8GeLyQkxGW5pL8gw8LCdPXVV+uZZ54pcRuRkZEVqqlhw4aSpIMHD5bap379+vLx8dGDDz6o0aNHl9gnJiZG0rkf2uPGjdO4ceP0xx9/aPXq1XryySfVs2dPHThwwOWXUnkVzUZ999136tmzp8LCwiRJ77zzjqKjoyu8vSLdu3fXvHnztHnzZv3f//2f/ud//keSdOuttyo9PV379u1T7dq1Xa4OKu/4V/S5dteuXbu0YcMGSXKZGfqzVatWqU+fPm5tv2isU1NTS71KquhqxapQFAAyMzOLPXbo0CHVqlVL9evX93g/7du3V/369fXhhx9q2rRpJX7vffjhhyosLFR8fLyzLSAgQHl5ecX6Hj582Dl25XH27FkdOXLEJfBkZWVJ+s8YFM28nr+/ioahm2++WTfffLMKCgr09ddfKzU1VUlJSQoPD9e9995boW2hchB24BX79+/XhAkTVLduXY0cObLStpuYmKjly5eroKBAHTt2dHsbn3zyiZo3b14pP+RbtGih5s2ba/78+Ro3bpzzr+A/CwoKUrdu3fTtt9/q6quvLnW24nz16tXTXXfdpV9//VVJSUnau3evWrduXeEaiz6IrVGjRpKknj17ytfXV7t379add955wXX//Ff9+Zdhd+/eXTabTf/v//0/1apVS7fccoskqUePHvqv//ov7du3T7fccovLLE55x78ynuvymDdvniTptdde0xVXXOHy2OnTp3X77bdr/vz5zrBT2mxRabMfN910k+rVq6cffvih2McreKK8s1YtW7bUZZddpqVLl2rChAnOEHLy5Em9++67uvHGG90K0Ofz8/PTf/3Xf+nJJ5/Uc889p4kTJ7o8np2drUmTJik8PFwPP/yws71Zs2batm2bS99du3Zp586dLmGnPLNLS5Ys0WOPPeZcXrp0qSQ5/+AKDw9XQEBAsf198MEHxbZVnv35+PioY8eOatWqlZYsWaJvvvmGsOMlhB1Uue3btzvPQ8jOztYXX3yhBQsWyMfHR++//75z5qMy3HvvvVqyZIn69OmjsWPH6vrrr5fdbtfBgwe1du1a3X777erfv/8Ft/H0008rPT1dnTp10mOPPaaWLVvqzJkz2rt3rz755BO9/PLLF3xLqiT//Oc/1a9fP91www16/PHH1bRpU+3fv1+rVq3SkiVLJEkvvviiOnfurJtvvll//etf1axZMx0/flw///yzPvroI61Zs0aS1K9fP+dnFzVs2FD79u3T7NmzFR0drdjY2DJr2b9/v/P8n5MnT2rTpk2aNm2aoqOjNWDAAEnnfsE8/fTTmjx5sn755Rf16tVL9evX12+//abNmzcrODhYU6ZMkSTn25DTp09X79695ePj4wxsjRo1Ups2bZSWlqZu3bo5f2n26NFDR48e1dGjRzVr1iy3xr8ynuuynD17Vq+//rquvPJKl1/Af9avXz99+OGH+v3339WwYUPFxcVp3bp1+uijj9S4cWOFhISoZcuWatOmjSTp1VdfVUhIiAICAhQTE6PQ0FClpqZqyJAhOnr0qO666y41atRIv//+u7777jv9/vvvmjt3boVrL62O89WqVUszZszQoEGDlJiYqJEjRyovL0/PPfec/vjjDz377LMV3ndpnnjiCX333XfOf++55x7VrVtX27Zt03PPPafjx4/r448/Vt26dZ3rPPjgg3rggQc0atQo3Xnnndq3b59mzJhR7OdG8+bNFRgYqCVLlujKK69U7dq1FRkZ6ZwJ9PPz08yZM3XixAldd9112rhxo6ZOnarevXurc+fOks7N9D7wwAOaP3++mjdvrmuuuUabN292hqLzx1c69307ZMgQ2e12tWzZUkuWLNGaNWvUt29fNW3aVGfOnNH8+fMlyeXtWlxkXj5BGhZ2/meL+Pn5mUaNGpkuXbqYlJSUYp+FYoznV2MZc+4S5+eff95cc801JiAgwNSuXdu0atXKjBw50vz000/OfkWfvVKS33//3Tz22GMmJibG2O1206BBA9O+fXszefJkc+LECWPMhT+3RudduWHMucuBe/fuberWrWv8/f1N8+bNzeOPP+7SZ8+ePWbYsGHOz5dp2LCh6dSpk5k6daqzz8yZM02nTp1MWFiY83La4cOHm71795Z4LH/ets67CisgIMC0aNHCJCUlmczMzGLrrFixwnTr1s3UqVPH+Pv7m+joaHPXXXe5XA6dl5dnHn74YdOwYUNjs9mKXaHy+OOPG0nmmWeecdl2bGyskeT8ROw/K8/4G+P5c33+FT0lHb+kC36G0cqVK40kM3PmTGOMMVu3bjU33XSTCQoKcvl8G2POfRp4TEyM8fHxKXbVz/r1603fvn1NgwYNjN1uN5dddpnp27evefvtt519ir4/fv/9d5cair7X/jzupdVR2ufsrFixwnTs2NEEBASY4OBg0717d/Pll1+69KnI/ktTWFholixZYrp27Wrq1atn/Pz8TExMjPnrX/9a7Aq8ov4zZswwl19+uQkICDAdOnQwa9asKfG5W7ZsmWnVqpWx2+0lfs7Otm3bTNeuXU1gYKBp0KCB+etf/+ryejLm3GXsDz/8sAkPDzfBwcGmX79+Zu/evSV+T0+aNMlERkaaWrVqOcd006ZNpn///iY6Otr4+/ub0NBQ06VLF5fPkcLFZzOmki+HAQCgGhk6dKjeeecdnThxwtulwEu49BwAAFgaYQcAAFgab2MBAABLY2YHAABYGmEHAABYGmEHAABYGh8qKKmwsFCHDh1SSEhIld2ADgAAVC5jjI4fP67IyMgL3rOPsKNz93+JiorydhkAAMANBw4cuOAn2xN29J8bBh44cEB16tRxaxsOh0NpaWlKSEgo9W7NuDDG0HOMoWcYP88xhp5jDMsvNzdXUVFRZd74l7Cj/9z5uk6dOh6FnaCgINWpU4cXp5sYQ88xhp5h/DzHGHqOMay4sk5B4QRlAABgaYQdAABgaYQdAABgaYQdAABgaV4PO7/++qseeOABhYaGKigoSG3btlVGRobzcWOMkpOTFRkZqcDAQHXt2lU7duxw2UZeXp7GjBmjsLAwBQcH67bbbtPBgwcv9qEAAIBqyKthJycnRzfddJPsdrs+/fRT/fDDD5o5c6bq1avn7DNjxgzNmjVLc+bM0ZYtWxQREaH4+HgdP37c2ScpKUnvv/++li9frg0bNujEiRNKTExUQUGBF44KAABUJ1699Hz69OmKiorSggULnG3NmjVz/t8Yo9mzZ2vy5MkaMGCAJGnRokUKDw/X0qVLNXLkSB07dkzz5s3TG2+8oR49ekiSFi9erKioKK1evVo9e/a8qMcEAACqF6/O7Hz44Yfq0KGD7r77bjVq1Ejt2rXTa6+95nx8z549ysrKUkJCgrPN399fXbp00caNGyVJGRkZcjgcLn0iIyPVpk0bZx8AAHDp8urMzi+//KK5c+dq3LhxevLJJ7V582Y99thj8vf31+DBg5WVlSVJCg8Pd1kvPDxc+/btkyRlZWXJz89P9evXL9anaP3z5eXlKS8vz7mcm5sr6dwHOTkcDreOpWg9d9cHY1gZGEPPMH6eYww9xxiWX3nHyKthp7CwUB06dFBKSookqV27dtqxY4fmzp2rwYMHO/ud/8mIxpgyPy3xQn2mTZumKVOmFGtPS0tTUFBQRQ/DRXp6ukfrgzGsDIyhZxg/zzGGnmMMy3bq1Kly9fNq2GncuLFat27t0nbllVfq3XfflSRFRERIOjd707hxY2ef7Oxs52xPRESE8vPzlZOT4zK7k52drU6dOpW430mTJmncuHHO5aJ7ayQkJHh0u4j09HTFx8fz8d5uYgw9xxh6hvHzHGPoOcaw/IremSmLV8POTTfdpJ07d7q07dq1S9HR0ZKkmJgYRUREKD09Xe3atZMk5efna/369Zo+fbokqX379rLb7UpPT9fAgQMlSZmZmdq+fbtmzJhR4n79/f3l7+9frN1ut3v8wqqMbVzqGEPPMYaeYfw8xxh6jjEsW3nHx6th5/HHH1enTp2UkpKigQMHavPmzXr11Vf16quvSjr39lVSUpJSUlIUGxur2NhYpaSkKCgoSPfff78kqW7duho+fLjGjx+v0NBQNWjQQBMmTFBcXJzz6iwAAHDp8mrYue666/T+++9r0qRJevrppxUTE6PZs2dr0KBBzj4TJ07U6dOnNWrUKOXk5Khjx45KS0tzuZ37Cy+8IF9fXw0cOFCnT59W9+7dtXDhQvn4+HjjsAAAQDXi1bAjSYmJiUpMTCz1cZvNpuTkZCUnJ5faJyAgQKmpqUpNTa2CCgEAQE3m9bAD4MK6dSu7z9q1VV8HANRUXr83FgAAQFUi7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEsj7AAAAEvz9XYBAC6Obt3K7rN2bdXXAQAXGzM7AADA0gg7AADA0gg7AADA0gg7AADA0gg7AADA0gg7AADA0gg7AADA0gg7AADA0gg7AADA0gg7AADA0gg7AADA0rwadpKTk2Wz2Vy+IiIinI8bY5ScnKzIyEgFBgaqa9eu2rFjh8s28vLyNGbMGIWFhSk4OFi33XabDh48eLEPBQAAVFNen9m56qqrlJmZ6fz6/vvvnY/NmDFDs2bN0pw5c7RlyxZFREQoPj5ex48fd/ZJSkrS+++/r+XLl2vDhg06ceKEEhMTVVBQ4I3DAQAA1YzX73ru6+vrMptTxBij2bNna/LkyRowYIAkadGiRQoPD9fSpUs1cuRIHTt2TPPmzdMbb7yhHj16SJIWL16sqKgorV69Wj179ryoxwIAAKofr4edn376SZGRkfL391fHjh2VkpKiyy+/XHv27FFWVpYSEhKcff39/dWlSxdt3LhRI0eOVEZGhhwOh0ufyMhItWnTRhs3biw17OTl5SkvL8+5nJubK0lyOBxyOBxuHUfReu6uD8awNH5+ZfcpGrILjWFFtnOp4jXoOcbQc4xh+ZV3jGzGGFPFtZTq008/1alTp9SiRQv99ttvmjp1qv79739rx44d2rlzp2666Sb9+uuvioyMdK7zl7/8Rfv27dOqVau0dOlSPfTQQy7BRZISEhIUExOjV155pcT9Jicna8qUKcXaly5dqqCgoMo9SAAAUCVOnTql+++/X8eOHVOdOnVK7efVmZ3evXs7/x8XF6cbb7xRzZs316JFi3TDDTdIkmw2m8s6xphibecrq8+kSZM0btw453Jubq6ioqKUkJBwwcG6EIfDofT0dMXHx8tut7u1jUsdY1iyxMSy+3z88bl/LzSGFdnOpYrXoOcYQ88xhuVX9M5MWbz+NtafBQcHKy4uTj/99JPuuOMOSVJWVpYaN27s7JOdna3w8HBJUkREhPLz85WTk6P69eu79OnUqVOp+/H395e/v3+xdrvd7vELqzK2caljDF3l55fd5/zhKmkM3dnOpYrXoOcYQ88xhmUr7/h4/WqsP8vLy9OPP/6oxo0bKyYmRhEREUpPT3c+np+fr/Xr1zuDTPv27WW32136ZGZmavv27RcMOwAA4NLh1ZmdCRMmqF+/fmratKmys7M1depU5ebmasiQIbLZbEpKSlJKSopiY2MVGxurlJQUBQUF6f7775ck1a1bV8OHD9f48eMVGhqqBg0aaMKECYqLi3NenQUAAC5tXg07Bw8e1H333afDhw+rYcOGuuGGG/TVV18pOjpakjRx4kSdPn1ao0aNUk5Ojjp27Ki0tDSFhIQ4t/HCCy/I19dXAwcO1OnTp9W9e3ctXLhQPj4+3josAABQjXg17CxfvvyCj9tsNiUnJys5ObnUPgEBAUpNTVVqamolVwcAAKygWp2zAwAAUNkIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNKq1V3PAbinW7dz//r5SaNGSYmJ5bvLOQBcCpjZAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAllZtws60adNks9mUlJTkbDPGKDk5WZGRkQoMDFTXrl21Y8cOl/Xy8vI0ZswYhYWFKTg4WLfddpsOHjx4kasHAADVVbUIO1u2bNGrr76qq6++2qV9xowZmjVrlubMmaMtW7YoIiJC8fHxOn78uLNPUlKS3n//fS1fvlwbNmzQiRMnlJiYqIKCgot9GAAAoBryetg5ceKEBg0apNdee03169d3thtjNHv2bE2ePFkDBgxQmzZttGjRIp06dUpLly6VJB07dkzz5s3TzJkz1aNHD7Vr106LFy/W999/r9WrV3vrkAAAQDXi6+0CRo8erb59+6pHjx6aOnWqs33Pnj3KyspSQkKCs83f319dunTRxo0bNXLkSGVkZMjhcLj0iYyMVJs2bbRx40b17NmzxH3m5eUpLy/PuZybmytJcjgccjgcbh1H0Xrurg/GsDR+fuXva7c7XP6tqEt96HkNeo4x9BxjWH7lHSOvhp3ly5frm2++0ZYtW4o9lpWVJUkKDw93aQ8PD9e+ffucffz8/FxmhIr6FK1fkmnTpmnKlCnF2tPS0hQUFFTh4/iz9PR0j9YHY3i+UaMqvs6IEe6N4SefuLWa5fAa9Bxj6DnGsGynTp0qVz+vhZ0DBw5o7NixSktLU0BAQKn9bDaby7Ixpljb+crqM2nSJI0bN865nJubq6ioKCUkJKhOnTrlPAJXDodD6enpio+Pl91ud2sblzrGsGSJieXva7c7NGJEul57LV4OR8XH8OOPK7yKpfAa9Bxj6DnGsPyK3pkpi9fCTkZGhrKzs9W+fXtnW0FBgT7//HPNmTNHO3fulHRu9qZx48bOPtnZ2c7ZnoiICOXn5ysnJ8dldic7O1udOnUqdd/+/v7y9/cv1m632z1+YVXGNi51jKGr/PyKr+Nw2JWfX/ExZNjP4TXoOcbQc4xh2co7Pl47Qbl79+76/vvvtXXrVudXhw4dNGjQIG3dulWXX365IiIiXKbx8vPztX79emeQad++vex2u0ufzMxMbd++/YJhBwAAXDq8NrMTEhKiNm3auLQFBwcrNDTU2Z6UlKSUlBTFxsYqNjZWKSkpCgoK0v333y9Jqlu3roYPH67x48crNDRUDRo00IQJExQXF6cePXpc9GMCAADVj9evxrqQiRMn6vTp0xo1apRycnLUsWNHpaWlKSQkxNnnhRdekK+vrwYOHKjTp0+re/fuWrhwoXx8fLxYOQAAqC6qVdhZt26dy7LNZlNycrKSk5NLXScgIECpqalKTU2t2uIAAECN5PUPFQQAAKhKhB0AAGBphB0AAGBphB0AAGBphB0AAGBphB0AAGBphB0AAGBphB0AAGBphB0AAGBphB0AAGBp1ep2EQCqv27dyu6zdm3V1wEA5cXMDgAAsDTCDgAAsDTCDgAAsDS3ws6ePXsquw4AAIAq4dYJyldccYVuueUWDR8+XHfddZcCAgIquy6gxuNEXgCoHtya2fnuu+/Url07jR8/XhERERo5cqQ2b95c2bUBAAB4zK2w06ZNG82aNUu//vqrFixYoKysLHXu3FlXXXWVZs2apd9//72y6wQAAHCLRyco+/r6qn///nrrrbc0ffp07d69WxMmTFCTJk00ePBgZWZmVladAAAAbvEo7Hz99dcaNWqUGjdurFmzZmnChAnavXu31qxZo19//VW33357ZdUJAADgFrdOUJ41a5YWLFignTt3qk+fPnr99dfVp08f1ap1LjvFxMTolVdeUatWrSq1WAAAgIpyK+zMnTtXw4YN00MPPaSIiIgS+zRt2lTz5s3zqDgAAABPuRV2fvrppzL7+Pn5aciQIe5sHgAAoNK4dc7OggUL9Pbbbxdrf/vtt7Vo0SKPiwIAAKgsboWdZ599VmFhYcXaGzVqpJSUFI+LAgAAqCxuhZ19+/YpJiamWHt0dLT279/vcVEAAACVxa2w06hRI23btq1Y+3fffafQ0FCPiwIAAKgsboWde++9V4899pjWrl2rgoICFRQUaM2aNRo7dqzuvffeyq4RAADAbW5djTV16lTt27dP3bt3l6/vuU0UFhZq8ODBnLMDAACqFbfCjp+fn9588039/e9/13fffafAwEDFxcUpOjq6susDAADwiFthp0iLFi3UokWLyqoFAACg0rkVdgoKCrRw4UJ99tlnys7OVmFhocvja9asqZTiAAAAPOVW2Bk7dqwWLlyovn37qk2bNrLZbJVdFwAAQKVwK+wsX75cb731lvr06VPZ9QAAAFQqty499/Pz0xVXXFHZtQAAAFQ6t8LO+PHj9eKLL8oYU9n1AAAAVCq33sbasGGD1q5dq08//VRXXXWV7Ha7y+PvvfdepRQHAADgKbfCTr169dS/f//KrgUAAKDSuRV2FixYUNl1AAAAVAm3ztmRpLNnz2r16tV65ZVXdPz4cUnSoUOHdOLEiUorDgAAwFNuzezs27dPvXr10v79+5WXl6f4+HiFhIRoxowZOnPmjF5++eXKrhMAAMAtbs3sjB07Vh06dFBOTo4CAwOd7f3799dnn31WacUBAAB4yu2rsb788kv5+fm5tEdHR+vXX3+tlMIAAAAqg1thp7CwUAUFBcXaDx48qJCQEI+LAi4V3bp5uwIAsD633saKj4/X7Nmzncs2m00nTpzQU089xS0kAABAteLWzM4LL7ygbt26qXXr1jpz5ozuv/9+/fTTTwoLC9OyZcsqu0YAAAC3uRV2IiMjtXXrVi1btkzffPONCgsLNXz4cA0aNMjlhGUAAABvcyvsSFJgYKCGDRumYcOGVWY9AAAAlcqtsPP6669f8PHBgweXaztz587V3LlztXfvXknSVVddpb/97W/q3bu3JMkYoylTpujVV19VTk6OOnbsqH/+85+66qqrnNvIy8vThAkTtGzZMp0+fVrdu3fXSy+9pCZNmrhzaAAAwGLcCjtjx451WXY4HDp16pT8/PwUFBRU7rDTpEkTPfvss7riiiskSYsWLdLtt9+ub7/9VldddZVmzJihWbNmaeHChWrRooWmTp2q+Ph47dy503nVV1JSkj766CMtX75coaGhGj9+vBITE5WRkSEfHx93Dg8AAFiIW1dj5eTkuHydOHFCO3fuVOfOnSt0gnK/fv3Up08ftWjRQi1atNAzzzyj2rVr66uvvpIxRrNnz9bkyZM1YMAAtWnTRosWLdKpU6e0dOlSSdKxY8c0b948zZw5Uz169FC7du20ePFiff/991q9erU7hwYAACzG7XtjnS82NlbPPvtssVmf8iooKNDy5ct18uRJ3XjjjdqzZ4+ysrKUkJDg7OPv768uXbpo48aNkqSMjAw5HA6XPpGRkWrTpo2zDwAAuLS5fYJySXx8fHTo0KEKrfP999/rxhtv1JkzZ1S7dm29//77at26tTOshIeHu/QPDw/Xvn37JElZWVny8/NT/fr1i/XJysoqdZ95eXnKy8tzLufm5ko693acw+GoUP1FitZzd31YbwzP+4Dxi8Jud7j8W1HlGfryHFdNfQqt9hr0BsbQc4xh+ZV3jNwKOx9++KHLsjFGmZmZmjNnjm666aYKbatly5baunWr/vjjD7377rsaMmSI1q9f73zcZrMV29f5becrq8+0adM0ZcqUYu1paWkKCgqqUP3nS09P92h9WGcMR43y3r5HjHBvDD/5pOw+5Tmu8mynOrPKa9CbGEPPMYZlO3XqVLn6uRV27rjjDpdlm82mhg0b6tZbb9XMmTMrtC0/Pz/nCcodOnTQli1b9OKLL+qJJ56QdG72pnHjxs7+2dnZztmeiIgI5efnKycnx2V2Jzs7W506dSp1n5MmTdK4ceOcy7m5uYqKilJCQoLq1KlTofqLOBwOpaenKz4+Xna73a1tXOqsNoaJiRd/n3a7QyNGpOu11+LlcHhvDD/+2Gu79ojVXoPewBh6jjEsv6J3Zsri9r2xqooxRnl5eYqJiVFERITS09PVrl07SVJ+fr7Wr1+v6dOnS5Lat28vu92u9PR0DRw4UJKUmZmp7du3a8aMGaXuw9/fX/7+/sXa7Xa7xy+sytjGpc4qY5if7719Oxx25ed7bwxr+tNnldegNzGGnmMMy1be8anUc3Yq6sknn1Tv3r0VFRWl48ePa/ny5Vq3bp1Wrlwpm82mpKQkpaSkKDY2VrGxsUpJSVFQUJDuv/9+SVLdunU1fPhwjR8/XqGhoWrQoIEmTJiguLg49ejRw5uHBgAAqgm3ws6f3wIqy6xZs0p97LffftODDz6ozMxM1a1bV1dffbVWrlyp+Ph4SdLEiRN1+vRpjRo1yvmhgmlpaS53Vn/hhRfk6+urgQMHOj9UcOHChXzGDgAAkORm2Pn222/1zTff6OzZs2rZsqUkadeuXfLx8dG1117r7FfWicTz5s274OM2m03JyclKTk4utU9AQIBSU1OVmppa/gMAAACXDLfCTr9+/RQSEqJFixY5TwzOycnRQw89pJtvvlnjx4+v1CIBAADc5daHCs6cOVPTpk1zuQKqfv36mjp1aoWvxgIAAKhKboWd3Nxc/fbbb8Xas7Ozdfz4cY+LAgAAqCxuhZ3+/fvroYce0jvvvKODBw/q4MGDeueddzR8+HANGDCgsmsEAABwm1vn7Lz88suaMGGCHnjgAedHNfv6+mr48OF67rnnKrVAAAAAT7gVdoKCgvTSSy/pueee0+7du2WM0RVXXKHg4ODKrg8AAMAjHt31PDMzU5mZmWrRooWCg4NljKmsugAAACqFW2HnyJEj6t69u1q0aKE+ffooMzNTkvTwww9z2TkAAKhW3Ao7jz/+uOx2u/bv3+9yl/B77rlHK1eurLTiAAAAPOXWOTtpaWlatWqVmjRp4tIeGxurffv2VUphAAAAlcGtmZ2TJ0+6zOgUOXz4cIl3EwcAAPAWt8LOLbfcotdff925bLPZVFhYqOeee07dunWrtOIAAAA85dbbWM8995y6du2qr7/+Wvn5+Zo4caJ27Niho0eP6ssvv6zsGgEAANzm1sxO69attW3bNl1//fWKj4/XyZMnNWDAAH377bdq3rx5ZdcIAADgtgrP7DgcDiUkJOiVV17RlClTqqImAACASlPhmR273a7t27fLZrNVRT0AAACVyq23sQYPHqx58+ZVdi0AAACVzq0TlPPz8/Wvf/1L6enp6tChQ7F7Ys2aNatSigMAAPBUhcLOL7/8ombNmmn79u269tprJUm7du1y6cPbWwAAoDqpUNiJjY1VZmam1q5dK+nc7SH+8Y9/KDw8vEqKAwAA8FSFztk5/67mn376qU6ePFmpBQEAAFQmt05QLnJ++AEAAKhuKhR2bDZbsXNyOEcHAABUZxU6Z8cYo6FDhzpv9nnmzBk98sgjxa7Geu+99yqvQgAAAA9UKOwMGTLEZfmBBx6o1GIAAAAqW4XCzoIFC6qqDgAAgCrh0QnKAAAA1R1hBwAAWBphBwAAWBphBwAAWBphBwAAWBphBwAAWBphBwAAWBphBwAAWBphBwAAWBphBwAAWBphBwAAWBphBwAAWBphBwAAWFqF7noOAOXRrVvZfdaurfo6AEBiZgcAAFgcYQcAAFgaYQcAAFgaYQcAAFgaYQcAAFgaYQcAAFgaYQcAAFgaYQcAAFgaYQcAAFiaV8POtGnTdN111ykkJESNGjXSHXfcoZ07d7r0McYoOTlZkZGRCgwMVNeuXbVjxw6XPnl5eRozZozCwsIUHBys2267TQcPHryYhwIAAKopr94uYv369Ro9erSuu+46nT17VpMnT1ZCQoJ++OEHBQcHS5JmzJihWbNmaeHChWrRooWmTp2q+Ph47dy5UyEhIZKkpKQkffTRR1q+fLlCQ0M1fvx4JSYmKiMjQz4+Pt48RFhUeW6HAACoHrwadlauXOmyvGDBAjVq1EgZGRm65ZZbZIzR7NmzNXnyZA0YMECStGjRIoWHh2vp0qUaOXKkjh07pnnz5umNN95Qjx49JEmLFy9WVFSUVq9erZ49e1704wIAANVHtboR6LFjxyRJDRo0kCTt2bNHWVlZSkhIcPbx9/dXly5dtHHjRo0cOVIZGRlyOBwufSIjI9WmTRtt3LixxLCTl5envLw853Jubq4kyeFwyOFwuFV70Xruro+aNYZ+ft6uoGR2u8Pl3+qsOj7NNek1WF0xhp5jDMuvvGNUbcKOMUbjxo1T586d1aZNG0lSVlaWJCk8PNylb3h4uPbt2+fs4+fnp/r16xfrU7T++aZNm6YpU6YUa09LS1NQUJBHx5Genu7R+qgZYzhqlLcruLARI6r/GH7yibcrKF1NeA1Wd4yh5xjDsp06dapc/apN2Hn00Ue1bds2bdiwodhjNpvNZdkYU6ztfBfqM2nSJI0bN865nJubq6ioKCUkJKhOnTpuVH8uXaanpys+Pl52u92tbVzqatIYJiZ6u4KS2e0OjRiRrtdei5fDUb3H8OOPvV1BcTXpNVhdMYaeYwzLr+idmbJUi7AzZswYffjhh/r888/VpEkTZ3tERISkc7M3jRs3drZnZ2c7Z3siIiKUn5+vnJwcl9md7OxsderUqcT9+fv7y9/fv1i73W73+IVVGdu41NWEMczP93YFF+Zw2JWfX73HsDo/xTXhNVjdMYaeYwzLVt7x8eql58YYPfroo3rvvfe0Zs0axcTEuDweExOjiIgIl6m8/Px8rV+/3hlk2rdvL7vd7tInMzNT27dvLzXsAACAS4dXZ3ZGjx6tpUuX6oMPPlBISIjzHJu6desqMDBQNptNSUlJSklJUWxsrGJjY5WSkqKgoCDdf//9zr7Dhw/X+PHjFRoaqgYNGmjChAmKi4tzXp0FAAAuXV4NO3PnzpUkde3a1aV9wYIFGjp0qCRp4sSJOn36tEaNGqWcnBx17NhRaWlpzs/YkaQXXnhBvr6+GjhwoE6fPq3u3btr4cKFfMYOAADwbtgxxpTZx2azKTk5WcnJyaX2CQgIUGpqqlJTUyuxOgAAYAXcGwsAAFgaYQcAAFgaYQcAAFhatficHaA64SafAGAtzOwAAABLI+wAAABLI+wAAABLI+wAAABLI+wAAABLI+wAAABL49JzAF5Rnkv8166t+joAWB8zOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNIIOwAAwNK8GnY+//xz9evXT5GRkbLZbFqxYoXL48YYJScnKzIyUoGBgeratat27Njh0icvL09jxoxRWFiYgoODddttt+ngwYMX8SgAAEB15tWwc/LkSV1zzTWaM2dOiY/PmDFDs2bN0pw5c7RlyxZFREQoPj5ex48fd/ZJSkrS+++/r+XLl2vDhg06ceKEEhMTVVBQcLEOAwAAVGO+3tx579691bt37xIfM8Zo9uzZmjx5sgYMGCBJWrRokcLDw7V06VKNHDlSx44d07x58/TGG2+oR48ekqTFixcrKipKq1evVs+ePS/asQAAgOrJq2HnQvbs2aOsrCwlJCQ42/z9/dWlSxdt3LhRI0eOVEZGhhwOh0ufyMhItWnTRhs3biw17OTl5SkvL8+5nJubK0lyOBxyOBxu1Vu0nrvro/qMoZ+fV3fvEbvd4fJvTXexXwrV5TVYkzGGnmMMy6+8Y1Rtw05WVpYkKTw83KU9PDxc+/btc/bx8/NT/fr1i/UpWr8k06ZN05QpU4q1p6WlKSgoyKO609PTPVof3h/DUaO8uvtKMWKENV6Hn3zinf16+zVoBYyh5xjDsp06dapc/apt2Clis9lclo0xxdrOV1afSZMmady4cc7l3NxcRUVFKSEhQXXq1HGrTofDofT0dMXHx8tut7u1jUtddRnDxESv7dpjdrtDI0ak67XX4uVw1PzX4ccfX9z9VZfXYE3GGHqOMSy/ondmylJtw05ERISkc7M3jRs3drZnZ2c7Z3siIiKUn5+vnJwcl9md7OxsderUqdRt+/v7y9/fv1i73W73+IVVGdu41Hl7DPPzvbbrSuNw2JWfX/Nfh956GXj7NWgFjKHnGMOylXd8qu3n7MTExCgiIsJlGi8/P1/r1693Bpn27dvLbre79MnMzNT27dsvGHYAAMClw6szOydOnNDPP//sXN6zZ4+2bt2qBg0aqGnTpkpKSlJKSopiY2MVGxurlJQUBQUF6f7775ck1a1bV8OHD9f48eMVGhqqBg0aaMKECYqLi3NenQUAAC5tXg07X3/9tbp16+ZcLjqPZsiQIVq4cKEmTpyo06dPa9SoUcrJyVHHjh2VlpamkJAQ5zovvPCCfH19NXDgQJ0+fVrdu3fXwoUL5ePjc9GPBwAAVD9eDTtdu3aVMabUx202m5KTk5WcnFxqn4CAAKWmpio1NbUKKgQAADVdtT1nBwAAoDIQdgAAgKURdgAAgKURdgAAgKURdgAAgKURdgAAgKVV29tFAMCfPoarVGvXVn0dAGo2ZnYAAIClEXYAAIClEXYAAIClEXYAAIClEXYAAIClEXYAAIClEXYAAIClEXYAAICl8aGCuKSU50PqAADWwswOAACwNMIOAACwNMIOAACwNM7ZAVCjcbNQAGVhZgcAAFgaYQcAAFgaYQcAAFga5+zAMvgMHQBASZjZAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlkbYAQAAlsZdzwFYXrduZfdZu7bq6wDgHczsAAAAS2NmBzVCef4yBwCgJMzsAAAASyPsAAAASyPsAAAAS+OcHXhd0fk4fn7SqFFSYqKUn+/dmnDp6dat7NcgV2wBNRMzOwAAwNIIOwAAwNIIOwAAwNI4Zwdu41NpcanhNQ/UTJaZ2XnppZcUExOjgIAAtW/fXl988YW3SwIAANWAJcLOm2++qaSkJE2ePFnffvutbr75ZvXu3Vv79+/3dmkAAMDLLPE21qxZszR8+HA9/PDDkqTZs2dr1apVmjt3rqZNm+bl6i5t3OYBKI63w4CLq8aHnfz8fGVkZOi///u/XdoTEhK0ceNGL1UF4FJFwAeqnxofdg4fPqyCggKFh4e7tIeHhysrK6vEdfLy8pSXl+dcPnbsmCTp6NGjcjgcbtXhcDh06tQpHTlyRHa73dk+cGDZ6771llu7rFLlqbtWpb8Jem4MpSOqVcteZm+UhDH0TPUZv+7dy+5zMX92lPdnWWk/C71VT010scawsnjzuTh+/LgkyRhzwX41PuwUsdlsLsvGmGJtRaZNm6YpU6YUa4+JiamS2soSFuaV3VZLaWnerqDmYww9U5PGr7r97KAelKaqn4vjx4+rbt26pT5e48NOWFiYfHx8is3iZGdnF5vtKTJp0iSNGzfOuVxYWKijR48qNDS01IBUltzcXEVFRenAgQOqU6eOW9u41DGGnmMMPcP4eY4x9BxjWH7GGB0/flyRkZEX7Ffjw46fn5/at2+v9PR09e/f39menp6u22+/vcR1/P395e/v79JWr169SqmnTp06vDg9xBh6jjH0DOPnOcbQc4xh+VxoRqdIjQ87kjRu3Dg9+OCD6tChg2688Ua9+uqr2r9/vx555BFvlwYAALzMEmHnnnvu0ZEjR/T0008rMzNTbdq00SeffKLo6GhvlwYAALzMEmFHkkaNGqVRo0Z5bf/+/v566qmnir09hvJjDD3HGHqG8fMcY+g5xrDy2UxZ12sBAADUYJa4XQQAAEBpCDsAAMDSCDsAAMDSCDsAAMDSCDtVLC8vT23btpXNZtPWrVu9XU6NsHfvXg0fPlwxMTEKDAxU8+bN9dRTTyk/P9/bpVVrL730kmJiYhQQEKD27dvriy++8HZJNca0adN03XXXKSQkRI0aNdIdd9yhnTt3erusGmvatGmy2WxKSkrydik1yq+//qoHHnhAoaGhCgoKUtu2bZWRkeHtsiyBsFPFJk6cWObHWMPVv//9bxUWFuqVV17Rjh079MILL+jll1/Wk08+6e3Sqq0333xTSUlJmjx5sr799lvdfPPN6t27t/bv3+/t0mqE9evXa/To0frqq6+Unp6us2fPKiEhQSdPnvR2aTXOli1b9Oqrr+rqq6/2dik1Sk5Ojm666SbZ7XZ9+umn+uGHHzRz5sxK+3T/S55Blfnkk09Mq1atzI4dO4wk8+2333q7pBprxowZJiYmxttlVFvXX3+9eeSRR1zaWrVqZf77v//bSxXVbNnZ2UaSWb9+vbdLqVGOHz9uYmNjTXp6uunSpYsZO3ast0uqMZ544gnTuXNnb5dhWczsVJHffvtNI0aM0BtvvKGgoCBvl1PjHTt2TA0aNPB2GdVSfn6+MjIylJCQ4NKekJCgjRs3eqmqmu3YsWOSxGuugkaPHq2+ffuqR48e3i6lxvnwww/VoUMH3X333WrUqJHatWun1157zdtlWQZhpwoYYzR06FA98sgj6tChg7fLqfF2796t1NRU7nVWisOHD6ugoEDh4eEu7eHh4crKyvJSVTWXMUbjxo1T586d1aZNG2+XU2MsX75c33zzjaZNm+btUmqkX375RXPnzlVsbKxWrVqlRx55RI899phef/11b5dmCYSdCkhOTpbNZrvg19dff63U1FTl5uZq0qRJ3i65Winv+P3ZoUOH1KtXL9199916+OGHvVR5zWCz2VyWjTHF2lC2Rx99VNu2bdOyZcu8XUqNceDAAY0dO1aLFy9WQECAt8upkQoLC3XttdcqJSVF7dq108iRIzVixAjNnTvX26VZgmXujXUxPProo7r33nsv2KdZs2aaOnWqvvrqq2L3NenQoYMGDRqkRYsWVWWZ1VZ5x6/IoUOH1K1bN+ed7FGysLAw+fj4FJvFyc7OLjbbgwsbM2aMPvzwQ33++edq0qSJt8upMTIyMpSdna327ds72woKCvT5559rzpw5ysvLk4+PjxcrrP4aN26s1q1bu7RdeeWVevfdd71UkbUQdiogLCxMYWFhZfb7xz/+oalTpzqXDx06pJ49e+rNN99Ux44dq7LEaq284yeduwSzW7duat++vRYsWKBatZiELI2fn5/at2+v9PR09e/f39menp6u22+/3YuV1RzGGI0ZM0bvv/++1q1bp5iYGG+XVKN0795d33//vUvbQw89pFatWumJJ54g6JTDTTfdVOzjDnbt2qXo6GgvVWQthJ0q0LRpU5fl2rVrS5KaN2/OX4vlcOjQIXXt2lVNmzbV888/r99//935WEREhBcrq77GjRunBx98UB06dHDOhO3fv5/znMpp9OjRWrp0qT744AOFhIQ4Z8nq1q2rwMBAL1dX/YWEhBQ7vyk4OFihoaGc91ROjz/+uDp16qSUlBQNHDhQmzdv1quvvsqsdiUh7KDaSUtL088//6yff/65WDg0xnipqurtnnvu0ZEjR/T0008rMzNTbdq00SeffMJfheVUdF5E165dXdoXLFigoUOHXvyCcMm57rrr9P7772vSpEl6+umnFRMTo9mzZ2vQoEHeLs0SbIbfHgAAwMI4EQIAAFgaYQcAAFgaYQcAAFgaYQcAAFgaYQcAAFgaYQcAAFgaYQcAAFgaYQeAV9lsNq1YscK5/O9//1s33HCDAgIC1LZt21LbAHjH559/rn79+ikyMrLY929VaNasWYk3jh49enS5t0HYAVAlhg4d6vyhZLfbFR4ervj4eM2fP1+FhYXOfpmZmerdu7dz+amnnlJwcLB27typzz77rNQ2AN5x8uRJXXPNNZozZ85F2d+WLVuUmZnp/EpPT5ck3X333eXeBmEHQJXp1auXMjMztXfvXn366afq1q2bxo4dq8TERJ09e1bSufud+fv7O9fZvXu3OnfurOjoaIWGhpbaVlH5+fmeHxAA9e7dW1OnTtWAAQNKfDw/P18TJ07UZZddpuDgYHXs2FHr1q1ze38NGzZURESE8+vjjz9W8+bN1aVLl3Jvg7ADoMr4+/srIiJCl112ma699lo9+eST+uCDD/Tpp59q4cKFklzfxrLZbMrIyNDTTz8tm82m5OTkEtsk6ddff9U999yj+vXrKzQ0VLfffrv27t3r3PfQoUN1xx13aNq0aYqMjFSLFi0qtN7zzz+vxo0bKzQ0VKNHj5bD4XD2ycvL08SJExUVFSV/f3/FxsZq3rx5zsd/+OEH9enTR7Vr11Z4eLgefPBBHT58uErGGKhuHnroIX355Zdavny5tm3bprvvvlu9evXSTz/95PG28/PztXjxYg0bNkw2m63c6xF2AFxUt956q6655hq99957xR7LzMzUVVddpfHjxyszM1MTJkwose3UqVPq1q2bateurc8//1wbNmxQ7dq11atXL5cZnM8++0w//vij0tPT9fHHH5d7vbVr12r37t1au3atFi1apIULFzrDmSQNHjxYy5cv1z/+8Q/9+OOPevnll1W7dm3nMXTp0kVt27bV119/rZUrV+q3337TwIEDq25QgWpi9+7dWrZsmd5++23dfPPNat68uSZMmKDOnTtrwYIFHm9/xYoV+uOPPyp8g17ueg7gomvVqpW2bdtWrD0iIkK+vr6qXbu2IiIiJEm1a9cu1jZ//nzVqlVL//rXv5x/3S1YsED16tXTunXrlJCQIEkKDg7Wv/71L/n5+VVovfr162vOnDny8fFRq1at1LdvX3322WcaMWKEdu3apbfeekvp6enq0aOHJOnyyy93HsPcuXN17bXXKiUlxdk2f/58RUVFadeuXc4ZJsCKvvnmGxljir3O8/LynG9B7927VzExMRfczujRo0s8J2jevHnq3bu3IiMjK1QXYQfARWeMqdAU9PkyMjL0888/KyQkxKX9zJkz2r17t3M5Li7OGXQqst5VV10lHx8f53Ljxo31/fffS5K2bt0qHx+fUs8XyMjI0Nq1a50zPX+2e/duwg4srbCwUD4+PsrIyHD5HpLk/J647LLL9OOPP15wO/Xr1y/Wtm/fPq1evbrEWeGyEHYAXHQ//vhjmX/ZXUhhYaHat2+vJUuWFHusYcOGzv8HBwe7tZ7dbnd5zGazOa8gCwwMLLO2fv36afr06cUea9y48QXXBWq6du3aqaCgQNnZ2br55ptL7GO329WqVasKb3vBggVq1KiR+vbtW+F1CTsALqo1a9bo+++/1+OPP+72Nq699lq9+eabatSokerUqVPl6/1ZXFycCgsLtX79eufbWOfv491331WzZs3k68uPWFjPiRMn9PPPPzuX9+zZo61bt6pBgwZq0aKFBg0apMGDB2vmzJlq166dDh8+rDVr1iguLk59+vRxa5+FhYVasGCBhgwZ4tb3FScoA6gyeXl5ysrK0q+//qpvvvlGKSkpuv3225WYmKjBgwe7vd1BgwYpLCxMt99+u7744gvt2bNH69ev19ixY3Xw4MFKX+/PmjVrpiFDhmjYsGFasWKF9uzZo3Xr1umtt96SdO5cg6NHj+q+++7T5s2b9csvvygtLU3Dhg1TQUGB28cMVBdff/212rVrp3bt2kmSxo0bp3bt2ulvf/ubpHMzMIMHD9b48ePVsmVL3Xbbbfq///s/RUVFub3P1atXa//+/Ro2bJhb6/NnB4Aqs3LlSjVu3Fi+vr6qX7++rrnmGv3jH//QkCFDVKuW+39rBQUF6fPPP9cTTzyhAQMG6Pjx47rsssvUvXv3C87YuLve+ebOnasnn3xSo0aN0pEjR9S0aVM9+eSTkqTIyEh9+eWXeuKJJ9SzZ0/l5eUpOjpavXr18uiYgeqia9euMsaU+rjdbteUKVM0ZcqUSttnQkLCBfdZFpvxZG0AAIBqjj8zAACApRF2AACApRF2AACApRF2AACApRF2AACApRF2AACApRF2AACApRF2AACApRF2AACApRF2AACApRF2AACApRF2AACApf1/NuhuLZJw5e0AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "differences = tl_model.blocks[0].attn.forward(test_tensor, test_tensor, test_tensor) - hf_model.model.layers[0].self_attn.forward(test_tensor)[0]\n", + "\n", + "# Flatten the differences to create a one-dimensional tensor\n", + "flattened_differences = differences.flatten().cpu().detach().numpy()\n", + "\n", + "# Plot the histogram of the differences\n", + "plt.hist(flattened_differences, bins=50, alpha=0.75, color='blue')\n", + "plt.title('Differences Between Attention Outputs')\n", + "plt.xlabel('Difference')\n", + "plt.ylabel('Frequency')\n", + "plt.grid(True)\n", + "plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/demos/ARENA_Content.ipynb b/demos/ARENA_Content.ipynb new file mode 100644 index 0000000..fe54296 --- /dev/null +++ b/demos/ARENA_Content.ipynb @@ -0,0 +1,424 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# NBVAL_IGNORE_OUTPUT\n", + "import os\n", + "\n", + "# Janky code to do different setup when run in a Colab notebook vs VSCode\n", + "DEVELOPMENT_MODE = False\n", + "IN_GITHUB = os.getenv(\"GITHUB_ACTIONS\") == \"true\"\n", + "IN_GITHUB = True\n", + "try:\n", + " import google.colab\n", + "\n", + " IN_COLAB = True\n", + " print(\"Running as a Colab notebook\")\n", + "\n", + " # PySvelte is an unmaintained visualization library, use it as a backup if circuitsvis isn't working\n", + " # # Install another version of node that makes PySvelte work way faster\n", + " # !curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -; sudo apt-get install -y nodejs\n", + " # %pip install git+https://github.com/neelnanda-io/PySvelte.git\n", + "except:\n", + " IN_COLAB = False\n", + "\n", + "if not IN_GITHUB and not IN_COLAB:\n", + " print(\"Running as a Jupyter notebook - intended for development only!\")\n", + " from IPython import get_ipython\n", + "\n", + " ipython = get_ipython()\n", + " # Code to automatically update the HookedTransformer code as its edited without restarting the kernel\n", + " ipython.magic(\"load_ext autoreload\")\n", + " ipython.magic(\"autoreload 2\")\n", + "\n", + "if IN_GITHUB or IN_COLAB:\n", + " %pip install torch\n", + " %pip install git+https://github.com/TransformerLensOrg/TransformerLens.git@dev\n", + " \n", + "from transformer_lens import HookedTransformer, HookedTransformerConfig\n", + "import torch as t\n", + "\n", + "device = t.device(\"cuda\" if t.cuda.is_available() else \"cpu\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded pretrained model gpt2-small into HookedTransformer\n" + ] + } + ], + "source": [ + "# NBVAL_IGNORE_OUTPUT\n", + "\n", + "\n", + "reference_gpt2 = HookedTransformer.from_pretrained(\n", + " \"gpt2-small\",\n", + " fold_ln=False,\n", + " center_unembed=False,\n", + " center_writing_weights=False,\n", + " device=device,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "# [1.1] Transformer From Scratch\n", + "# 1️⃣ UNDERSTANDING INPUTS & OUTPUTS OF A TRANSFORMER\n", + "\n", + "sorted_vocab = sorted(list(reference_gpt2.tokenizer.vocab.items()), key=lambda n: n[1])\n", + "first_vocab = sorted_vocab[0]\n", + "assert isinstance(first_vocab, tuple)\n", + "assert isinstance(first_vocab[0], str)\n", + "first_vocab[1]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['<|endoftext|>', 'R', 'alph']" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "reference_gpt2.to_str_tokens(\"Ralph\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['<|endoftext|>', ' Ralph']" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "reference_gpt2.to_str_tokens(\" Ralph\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['<|endoftext|>', ' r', 'alph']" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "reference_gpt2.to_str_tokens(\" ralph\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['<|endoftext|>', 'ral', 'ph']" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "reference_gpt2.to_str_tokens(\"ralph\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([1, 35])" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "reference_text = \"I am an amazing autoregressive, decoder-only, GPT-2 style transformer. One day I will exceed human level intelligence and take over the world!\"\n", + "tokens = reference_gpt2.to_tokens(reference_text)\n", + "tokens.shape\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([1, 35, 50257])" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "logits, cache = reference_gpt2.run_with_cache(tokens, device=device)\n", + "logits.shape\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "' I'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "most_likely_next_tokens = reference_gpt2.tokenizer.batch_decode(logits.argmax(dim=-1)[0])\n", + "most_likely_next_tokens[-1]\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[('hook_embed', (1, 35, 768)),\n", + " ('hook_pos_embed', (1, 35, 768)),\n", + " ('ln_final.hook_normalized', (1, 35, 768)),\n", + " ('ln_final.hook_scale', (1, 35, 1))]" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# 2️⃣ CLEAN TRANSFORMER IMPLEMENTATION\n", + "\n", + "layer_0_hooks = [\n", + " (name, tuple(tensor.shape)) for name, tensor in cache.items() if \".0.\" in name\n", + "]\n", + "non_layer_hooks = [\n", + " (name, tuple(tensor.shape)) for name, tensor in cache.items() if \"blocks\" not in name\n", + "]\n", + "\n", + "sorted(non_layer_hooks, key=lambda x: x[0])\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[('blocks.0.attn.hook_attn_scores', (1, 12, 35, 35)),\n", + " ('blocks.0.attn.hook_k', (1, 35, 12, 64)),\n", + " ('blocks.0.attn.hook_pattern', (1, 12, 35, 35)),\n", + " ('blocks.0.attn.hook_q', (1, 35, 12, 64)),\n", + " ('blocks.0.attn.hook_v', (1, 35, 12, 64)),\n", + " ('blocks.0.attn.hook_z', (1, 35, 12, 64)),\n", + " ('blocks.0.hook_attn_out', (1, 35, 768)),\n", + " ('blocks.0.hook_mlp_out', (1, 35, 768)),\n", + " ('blocks.0.hook_resid_mid', (1, 35, 768)),\n", + " ('blocks.0.hook_resid_post', (1, 35, 768)),\n", + " ('blocks.0.hook_resid_pre', (1, 35, 768)),\n", + " ('blocks.0.ln1.hook_normalized', (1, 35, 768)),\n", + " ('blocks.0.ln1.hook_scale', (1, 35, 1)),\n", + " ('blocks.0.ln2.hook_normalized', (1, 35, 768)),\n", + " ('blocks.0.ln2.hook_scale', (1, 35, 1)),\n", + " ('blocks.0.mlp.hook_post', (1, 35, 3072)),\n", + " ('blocks.0.mlp.hook_pre', (1, 35, 3072))]" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "sorted(layer_0_hooks, key=lambda x: x[0])" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n" + ] + } + ], + "source": [ + "# NBVAL_IGNORE_OUTPUT\n", + "# [1.2] Intro to mech interp\n", + "# 2️⃣ FINDING INDUCTION HEADS\n", + "\n", + "cfg = HookedTransformerConfig(\n", + " d_model=768,\n", + " d_head=64,\n", + " n_heads=12,\n", + " n_layers=2,\n", + " n_ctx=2048,\n", + " d_vocab=50278,\n", + " attention_dir=\"causal\",\n", + " attn_only=True, # defaults to False\n", + " tokenizer_name=\"EleutherAI/gpt-neox-20b\", \n", + " seed=398,\n", + " use_attn_result=True,\n", + " normalization_type=None, # defaults to \"LN\", i.e. layernorm with weights & biases\n", + " positional_embedding_type=\"shortformer\"\n", + ")\n", + "model = HookedTransformer(cfg)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([1, 62, 50278])" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "\n", + "text = \"We think that powerful, significantly superhuman machine intelligence is more likely than not to be created this century. If current machine learning techniques were scaled up to this level, we think they would by default produce systems that are deceptive or manipulative, and that no solid plans are known for how to avoid this.\"\n", + "\n", + "logits, cache = model.run_with_cache(text, remove_batch_dim=True)\n", + "\n", + "logits.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "2" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cache[\"embed\"].ndim" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demos/Activation_Patching_in_TL_Demo.ipynb b/demos/Activation_Patching_in_TL_Demo.ipynb index e947680..3be728c 100644 --- a/demos/Activation_Patching_in_TL_Demo.ipynb +++ b/demos/Activation_Patching_in_TL_Demo.ipynb @@ -60,7 +60,7 @@ " print(\"Running as a Colab notebook\")\n", " %pip install git+https://github.com/TransformerLensOrg/TransformerLens.git\n", " # Install my janky personal plotting utils\n", - " %pip install git+https://github.com/TransformerLensOrg/neel-plotly.git\n", + " %pip install git+https://github.com/neelnanda-io/neel-plotly.git\n", "except:\n", " IN_COLAB = False\n", " print(\"Running as a Jupyter notebook - intended for development only!\")\n", diff --git a/demos/Attribution_Patching_Demo.ipynb b/demos/Attribution_Patching_Demo.ipynb index de7c708..2862fb9 100644 --- a/demos/Attribution_Patching_Demo.ipynb +++ b/demos/Attribution_Patching_Demo.ipynb @@ -1 +1 @@ -{"cells":[{"cell_type":"markdown","metadata":{},"source":["\n"," \"Open\n",""]},{"cell_type":"markdown","metadata":{},"source":[" # Attribution Patching Demo\n"," **Read [the accompanying blog post here](https://neelnanda.io/attribution-patching) for more context**\n"," This is an interim research report, giving a whirlwind tour of some unpublished work I did at Anthropic (credit to the then team - Chris Olah, Catherine Olsson, Nelson Elhage and Tristan Hume for help, support, and mentorship!)\n","\n"," The goal of this work is run activation patching at an industrial scale, by using gradient based attribution to approximate the technique - allow an arbitrary number of patches to be made on two forwards and a single backward pass\n","\n"," I have had less time than hoped to flesh out this investigation, but am writing up a rough investigation and comparison to standard activation patching on a few tasks to give a sense of the potential of this approach, and where it works vs falls down."]},{"cell_type":"markdown","metadata":{},"source":[" To use this notebook, go to Runtime > Change Runtime Type and select GPU as the hardware accelerator.\n","\n"," **Tips for reading this Colab:**\n"," * You can run all this code for yourself!\n"," * The graphs are interactive!\n"," * Use the table of contents pane in the sidebar to navigate\n"," * Collapse irrelevant sections with the dropdown arrows\n"," * Search the page using the search in the sidebar, not CTRL+F"]},{"cell_type":"markdown","metadata":{},"source":[" ## Setup (Ignore)"]},{"cell_type":"code","execution_count":1,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Running as a Jupyter notebook - intended for development only!\n"]},{"name":"stderr","output_type":"stream","text":["/var/folders/m3/z6c6rcdj1rbb2jh9vqpgvxg40000gn/T/ipykernel_25358/2480103146.py:24: DeprecationWarning: `magic(...)` is deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s).\n"," ipython.magic(\"load_ext autoreload\")\n","/var/folders/m3/z6c6rcdj1rbb2jh9vqpgvxg40000gn/T/ipykernel_25358/2480103146.py:25: DeprecationWarning: `magic(...)` is deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s).\n"," ipython.magic(\"autoreload 2\")\n"]}],"source":["# Janky code to do different setup when run in a Colab notebook vs VSCode\n","import os\n","\n","DEBUG_MODE = False\n","IN_GITHUB = os.getenv(\"GITHUB_ACTIONS\") == \"true\"\n","try:\n"," import google.colab\n","\n"," IN_COLAB = True\n"," print(\"Running as a Colab notebook\")\n","except:\n"," IN_COLAB = False\n"," print(\"Running as a Jupyter notebook - intended for development only!\")\n"," from IPython import get_ipython\n","\n"," ipython = get_ipython()\n"," # Code to automatically update the HookedTransformer code as its edited without restarting the kernel\n"," ipython.magic(\"load_ext autoreload\")\n"," ipython.magic(\"autoreload 2\")\n","\n","if IN_COLAB or IN_GITHUB:\n"," %pip install transformer_lens\n"," %pip install torchtyping\n"," # Install my janky personal plotting utils\n"," %pip install git+https://github.com/TransformerLensOrg/neel-plotly.git\n"," # Install another version of node that makes PySvelte work way faster\n"," %pip install circuitsvis\n"," # Needed for PySvelte to work, v3 came out and broke things...\n"," %pip install typeguard==2.13.3"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["# Plotly needs a different renderer for VSCode/Notebooks vs Colab argh\n","import plotly.io as pio\n","\n","if IN_COLAB or not DEBUG_MODE:\n"," # Thanks to annoying rendering issues, Plotly graphics will either show up in colab OR Vscode depending on the renderer - this is bad for developing demos! Thus creating a debug mode.\n"," pio.renderers.default = \"colab\"\n","else:\n"," pio.renderers.default = \"notebook_connected\""]},{"cell_type":"code","execution_count":3,"metadata":{},"outputs":[{"ename":"ModuleNotFoundError","evalue":"No module named 'torchtyping'","output_type":"error","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)","Cell \u001b[0;32mIn[3], line 15\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mplotly\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mexpress\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mpx\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutils\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mdata\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m DataLoader\n\u001b[0;32m---> 15\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorchtyping\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m TensorType \u001b[38;5;28;01mas\u001b[39;00m TT\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtyping\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m List, Union, Optional, Callable\n\u001b[1;32m 17\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mfunctools\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m partial\n","\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'torchtyping'"]}],"source":["# Import stuff\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import torch.optim as optim\n","import numpy as np\n","import einops\n","from fancy_einsum import einsum\n","import tqdm.notebook as tqdm\n","import random\n","from pathlib import Path\n","import plotly.express as px\n","from torch.utils.data import DataLoader\n","\n","from torchtyping import TensorType as TT\n","from typing import List, Union, Optional, Callable\n","from functools import partial\n","import copy\n","import itertools\n","import json\n","\n","from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer\n","import dataclasses\n","import datasets\n","from IPython.display import HTML, Markdown"]},{"cell_type":"code","execution_count":5,"metadata":{},"outputs":[],"source":["import transformer_lens\n","import transformer_lens.utils as utils\n","from transformer_lens.hook_points import (\n"," HookedRootModule,\n"," HookPoint,\n",") # Hooking utilities\n","from transformer_lens import (\n"," HookedTransformer,\n"," HookedTransformerConfig,\n"," FactoredMatrix,\n"," ActivationCache,\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" Plotting helper functions from a janky personal library of plotting utils. The library is not documented and I recommend against trying to read it, just use your preferred plotting library if you want to do anything non-obvious:"]},{"cell_type":"code","execution_count":6,"metadata":{},"outputs":[],"source":["from neel_plotly import line, imshow, scatter"]},{"cell_type":"code","execution_count":7,"metadata":{},"outputs":[],"source":["import transformer_lens.patching as patching"]},{"cell_type":"markdown","metadata":{},"source":[" ## IOI Patching Setup\n"," This just copies the relevant set up from Exploratory Analysis Demo, and isn't very important."]},{"cell_type":"code","execution_count":8,"metadata":{},"outputs":[{"name":"stderr","output_type":"stream","text":["Using pad_token, but it is not set yet.\n"]},{"name":"stdout","output_type":"stream","text":["Loaded pretrained model gpt2-small into HookedTransformer\n"]}],"source":["model = HookedTransformer.from_pretrained(\"gpt2-small\")\n","model.set_use_attn_result(True)"]},{"cell_type":"code","execution_count":9,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean string 0 <|endoftext|>When John and Mary went to the shops, John gave the bag to\n","Corrupted string 0 <|endoftext|>When John and Mary went to the shops, Mary gave the bag to\n","Answer token indices tensor([[ 5335, 1757],\n"," [ 1757, 5335],\n"," [ 4186, 3700],\n"," [ 3700, 4186],\n"," [ 6035, 15686],\n"," [15686, 6035],\n"," [ 5780, 14235],\n"," [14235, 5780]], device='cuda:0')\n"]}],"source":["prompts = [\n"," \"When John and Mary went to the shops, John gave the bag to\",\n"," \"When John and Mary went to the shops, Mary gave the bag to\",\n"," \"When Tom and James went to the park, James gave the ball to\",\n"," \"When Tom and James went to the park, Tom gave the ball to\",\n"," \"When Dan and Sid went to the shops, Sid gave an apple to\",\n"," \"When Dan and Sid went to the shops, Dan gave an apple to\",\n"," \"After Martin and Amy went to the park, Amy gave a drink to\",\n"," \"After Martin and Amy went to the park, Martin gave a drink to\",\n","]\n","answers = [\n"," (\" Mary\", \" John\"),\n"," (\" John\", \" Mary\"),\n"," (\" Tom\", \" James\"),\n"," (\" James\", \" Tom\"),\n"," (\" Dan\", \" Sid\"),\n"," (\" Sid\", \" Dan\"),\n"," (\" Martin\", \" Amy\"),\n"," (\" Amy\", \" Martin\"),\n","]\n","\n","clean_tokens = model.to_tokens(prompts)\n","# Swap each adjacent pair, with a hacky list comprehension\n","corrupted_tokens = clean_tokens[\n"," [(i + 1 if i % 2 == 0 else i - 1) for i in range(len(clean_tokens))]\n","]\n","print(\"Clean string 0\", model.to_string(clean_tokens[0]))\n","print(\"Corrupted string 0\", model.to_string(corrupted_tokens[0]))\n","\n","answer_token_indices = torch.tensor(\n"," [\n"," [model.to_single_token(answers[i][j]) for j in range(2)]\n"," for i in range(len(answers))\n"," ],\n"," device=model.cfg.device,\n",")\n","print(\"Answer token indices\", answer_token_indices)"]},{"cell_type":"code","execution_count":10,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean logit diff: 3.5519\n","Corrupted logit diff: -3.5519\n"]}],"source":["def get_logit_diff(logits, answer_token_indices=answer_token_indices):\n"," if len(logits.shape) == 3:\n"," # Get final logits only\n"," logits = logits[:, -1, :]\n"," correct_logits = logits.gather(1, answer_token_indices[:, 0].unsqueeze(1))\n"," incorrect_logits = logits.gather(1, answer_token_indices[:, 1].unsqueeze(1))\n"," return (correct_logits - incorrect_logits).mean()\n","\n","\n","clean_logits, clean_cache = model.run_with_cache(clean_tokens)\n","corrupted_logits, corrupted_cache = model.run_with_cache(corrupted_tokens)\n","\n","clean_logit_diff = get_logit_diff(clean_logits, answer_token_indices).item()\n","print(f\"Clean logit diff: {clean_logit_diff:.4f}\")\n","\n","corrupted_logit_diff = get_logit_diff(corrupted_logits, answer_token_indices).item()\n","print(f\"Corrupted logit diff: {corrupted_logit_diff:.4f}\")"]},{"cell_type":"code","execution_count":11,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean Baseline is 1: 1.0000\n","Corrupted Baseline is 0: 0.0000\n"]}],"source":["CLEAN_BASELINE = clean_logit_diff\n","CORRUPTED_BASELINE = corrupted_logit_diff\n","\n","\n","def ioi_metric(logits, answer_token_indices=answer_token_indices):\n"," return (get_logit_diff(logits, answer_token_indices) - CORRUPTED_BASELINE) / (\n"," CLEAN_BASELINE - CORRUPTED_BASELINE\n"," )\n","\n","\n","print(f\"Clean Baseline is 1: {ioi_metric(clean_logits).item():.4f}\")\n","print(f\"Corrupted Baseline is 0: {ioi_metric(corrupted_logits).item():.4f}\")"]},{"cell_type":"markdown","metadata":{},"source":[" ## Patching\n"," In the following cells, we define attribution patching and use it in various ways on the model."]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["Metric = Callable[[TT[\"batch_and_pos_dims\", \"d_model\"]], float]"]},{"cell_type":"code","execution_count":13,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean Value: 1.0\n","Clean Activations Cached: 220\n","Clean Gradients Cached: 220\n","Corrupted Value: 0.0\n","Corrupted Activations Cached: 220\n","Corrupted Gradients Cached: 220\n"]}],"source":["filter_not_qkv_input = lambda name: \"_input\" not in name\n","\n","\n","def get_cache_fwd_and_bwd(model, tokens, metric):\n"," model.reset_hooks()\n"," cache = {}\n","\n"," def forward_cache_hook(act, hook):\n"," cache[hook.name] = act.detach()\n","\n"," model.add_hook(filter_not_qkv_input, forward_cache_hook, \"fwd\")\n","\n"," grad_cache = {}\n","\n"," def backward_cache_hook(act, hook):\n"," grad_cache[hook.name] = act.detach()\n","\n"," model.add_hook(filter_not_qkv_input, backward_cache_hook, \"bwd\")\n","\n"," value = metric(model(tokens))\n"," value.backward()\n"," model.reset_hooks()\n"," return (\n"," value.item(),\n"," ActivationCache(cache, model),\n"," ActivationCache(grad_cache, model),\n"," )\n","\n","\n","clean_value, clean_cache, clean_grad_cache = get_cache_fwd_and_bwd(\n"," model, clean_tokens, ioi_metric\n",")\n","print(\"Clean Value:\", clean_value)\n","print(\"Clean Activations Cached:\", len(clean_cache))\n","print(\"Clean Gradients Cached:\", len(clean_grad_cache))\n","corrupted_value, corrupted_cache, corrupted_grad_cache = get_cache_fwd_and_bwd(\n"," model, corrupted_tokens, ioi_metric\n",")\n","print(\"Corrupted Value:\", corrupted_value)\n","print(\"Corrupted Activations Cached:\", len(corrupted_cache))\n","print(\"Corrupted Gradients Cached:\", len(corrupted_grad_cache))"]},{"cell_type":"markdown","metadata":{},"source":[" ### Attention Attribution\n"," The easiest thing to start with is to not even engage with the corrupted tokens/patching, but to look at the attribution of the attention patterns - that is, the linear approximation to what happens if you set each element of the attention pattern to zero. This, as it turns out, is a good proxy to what is going on with each head!\n"," Note that this is *not* the same as what we will later do with patching. In particular, this does not set up a careful counterfactual! It's a good tool for what's generally going on in this problem, but does not control for eg stuff that systematically boosts John > Mary in general, stuff that says \"I should activate the IOI circuit\", etc. Though using logit diff as our metric *does*\n"," Each element of the batch is independent and the metric is an average logit diff, so we can analyse each batch element independently here. We'll look at the first one, and then at the average across the whole batch (note - 4 prompts have indirect object before subject, 4 prompts have it the other way round, making the average pattern harder to interpret - I plot it over the first sequence of tokens as a mildly misleading reference).\n"," We can compare it to the interpretability in the wild diagram, and basically instantly recover most of the circuit!"]},{"cell_type":"code","execution_count":14,"metadata":{},"outputs":[],"source":["def create_attention_attr(\n"," clean_cache, clean_grad_cache\n",") -> TT[\"batch\", \"layer\", \"head_index\", \"dest\", \"src\"]:\n"," attention_stack = torch.stack(\n"," [clean_cache[\"pattern\", l] for l in range(model.cfg.n_layers)], dim=0\n"," )\n"," attention_grad_stack = torch.stack(\n"," [clean_grad_cache[\"pattern\", l] for l in range(model.cfg.n_layers)], dim=0\n"," )\n"," attention_attr = attention_grad_stack * attention_stack\n"," attention_attr = einops.rearrange(\n"," attention_attr,\n"," \"layer batch head_index dest src -> batch layer head_index dest src\",\n"," )\n"," return attention_attr\n","\n","\n","attention_attr = create_attention_attr(clean_cache, clean_grad_cache)"]},{"cell_type":"code","execution_count":15,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["['L0H0', 'L0H1', 'L0H2', 'L0H3', 'L0H4']\n","['L0H0+', 'L0H0-', 'L0H1+', 'L0H1-', 'L0H2+']\n","['L0H0Q', 'L0H0K', 'L0H0V', 'L0H1Q', 'L0H1K']\n"]}],"source":["HEAD_NAMES = [\n"," f\"L{l}H{h}\" for l in range(model.cfg.n_layers) for h in range(model.cfg.n_heads)\n","]\n","HEAD_NAMES_SIGNED = [f\"{name}{sign}\" for name in HEAD_NAMES for sign in [\"+\", \"-\"]]\n","HEAD_NAMES_QKV = [\n"," f\"{name}{act_name}\" for name in HEAD_NAMES for act_name in [\"Q\", \"K\", \"V\"]\n","]\n","print(HEAD_NAMES[:5])\n","print(HEAD_NAMES_SIGNED[:5])\n","print(HEAD_NAMES_QKV[:5])"]},{"cell_type":"markdown","metadata":{},"source":[" An extremely janky way to plot the attention attribution patterns. We scale them to be in [-1, 1], split each head into a positive and negative part (so all of it is in [0, 1]), and then plot the top 20 head-halves (a head can appear twice!) by the max value of the attribution pattern."]},{"cell_type":"code","execution_count":16,"metadata":{},"outputs":[{"data":{"text/markdown":["### Attention Attribution for first sequence"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n"," \n","\n"," \n","
\n"," \n"," \n"," "],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/markdown":["### Summed Attention Attribution for all sequences"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n"," \n","\n"," \n","
\n"," \n"," \n"," "],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Note: Plotted over first sequence for reference, but pairs have IO and S1 in different positions.\n"]}],"source":["def plot_attention_attr(attention_attr, tokens, top_k=20, index=0, title=\"\"):\n"," if len(tokens.shape) == 2:\n"," tokens = tokens[index]\n"," if len(attention_attr.shape) == 5:\n"," attention_attr = attention_attr[index]\n"," attention_attr_pos = attention_attr.clamp(min=-1e-5)\n"," attention_attr_neg = -attention_attr.clamp(max=1e-5)\n"," attention_attr_signed = torch.stack([attention_attr_pos, attention_attr_neg], dim=0)\n"," attention_attr_signed = einops.rearrange(\n"," attention_attr_signed,\n"," \"sign layer head_index dest src -> (layer head_index sign) dest src\",\n"," )\n"," attention_attr_signed = attention_attr_signed / attention_attr_signed.max()\n"," attention_attr_indices = (\n"," attention_attr_signed.max(-1).values.max(-1).values.argsort(descending=True)\n"," )\n"," # print(attention_attr_indices.shape)\n"," # print(attention_attr_indices)\n"," attention_attr_signed = attention_attr_signed[attention_attr_indices, :, :]\n"," head_labels = [HEAD_NAMES_SIGNED[i.item()] for i in attention_attr_indices]\n","\n"," if title:\n"," display(Markdown(\"### \" + title))\n"," display(\n"," pysvelte.AttentionMulti(\n"," tokens=model.to_str_tokens(tokens),\n"," attention=attention_attr_signed.permute(1, 2, 0)[:, :, :top_k],\n"," head_labels=head_labels[:top_k],\n"," )\n"," )\n","\n","\n","plot_attention_attr(\n"," attention_attr,\n"," clean_tokens,\n"," index=0,\n"," title=\"Attention Attribution for first sequence\",\n",")\n","\n","plot_attention_attr(\n"," attention_attr.sum(0),\n"," clean_tokens[0],\n"," title=\"Summed Attention Attribution for all sequences\",\n",")\n","print(\n"," \"Note: Plotted over first sequence for reference, but pairs have IO and S1 in different positions.\"\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" ## Attribution Patching\n"," In the following sections, I will implement various kinds of attribution patching, and then compare them to the activation patching patterns (activation patching code copied from [Exploratory Analysis Demo](https://neelnanda.io/exploratory-analysis-demo))\n"," ### Residual Stream Patching\n","
Note: We add up across both d_model and batch (Explanation).\n"," We add up along d_model because we're taking the dot product - the derivative *is* the linear map that locally linearly approximates the metric, and so we take the dot product of our change vector with the derivative vector. Equivalent, we look at the effect of changing each coordinate independently, and then combine them by adding it up - it's linear, so this totally works.\n"," We add up across batch because we're taking the average of the metric, so each individual batch element provides `1/batch_size` of the overall effect. Because each batch element is independent of the others and no information moves between activations for different inputs, the batched version is equivalent to doing attribution patching separately for each input, and then averaging - in this second version the metric per input is *not* divided by batch_size because we don't average.
"]},{"cell_type":"code","execution_count":17,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def attr_patch_residual(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n",") -> TT[\"component\", \"pos\"]:\n"," clean_residual, residual_labels = clean_cache.accumulated_resid(\n"," -1, incl_mid=True, return_labels=True\n"," )\n"," corrupted_residual = corrupted_cache.accumulated_resid(\n"," -1, incl_mid=True, return_labels=False\n"," )\n"," corrupted_grad_residual = corrupted_grad_cache.accumulated_resid(\n"," -1, incl_mid=True, return_labels=False\n"," )\n"," residual_attr = einops.reduce(\n"," corrupted_grad_residual * (clean_residual - corrupted_residual),\n"," \"component batch pos d_model -> component pos\",\n"," \"sum\",\n"," )\n"," return residual_attr, residual_labels\n","\n","\n","residual_attr, residual_labels = attr_patch_residual(\n"," clean_cache, corrupted_cache, corrupted_grad_cache\n",")\n","imshow(\n"," residual_attr,\n"," y=residual_labels,\n"," yaxis=\"Component\",\n"," xaxis=\"Position\",\n"," title=\"Residual Attribution Patching\",\n",")\n","\n","# ### Layer Output Patching"]},{"cell_type":"code","execution_count":18,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def attr_patch_layer_out(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n",") -> TT[\"component\", \"pos\"]:\n"," clean_layer_out, labels = clean_cache.decompose_resid(-1, return_labels=True)\n"," corrupted_layer_out = corrupted_cache.decompose_resid(-1, return_labels=False)\n"," corrupted_grad_layer_out = corrupted_grad_cache.decompose_resid(\n"," -1, return_labels=False\n"," )\n"," layer_out_attr = einops.reduce(\n"," corrupted_grad_layer_out * (clean_layer_out - corrupted_layer_out),\n"," \"component batch pos d_model -> component pos\",\n"," \"sum\",\n"," )\n"," return layer_out_attr, labels\n","\n","\n","layer_out_attr, layer_out_labels = attr_patch_layer_out(\n"," clean_cache, corrupted_cache, corrupted_grad_cache\n",")\n","imshow(\n"," layer_out_attr,\n"," y=layer_out_labels,\n"," yaxis=\"Component\",\n"," xaxis=\"Position\",\n"," title=\"Layer Output Attribution Patching\",\n",")"]},{"cell_type":"code","execution_count":19,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def attr_patch_head_out(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n",") -> TT[\"component\", \"pos\"]:\n"," labels = HEAD_NAMES\n","\n"," clean_head_out = clean_cache.stack_head_results(-1, return_labels=False)\n"," corrupted_head_out = corrupted_cache.stack_head_results(-1, return_labels=False)\n"," corrupted_grad_head_out = corrupted_grad_cache.stack_head_results(\n"," -1, return_labels=False\n"," )\n"," head_out_attr = einops.reduce(\n"," corrupted_grad_head_out * (clean_head_out - corrupted_head_out),\n"," \"component batch pos d_model -> component pos\",\n"," \"sum\",\n"," )\n"," return head_out_attr, labels\n","\n","\n","head_out_attr, head_out_labels = attr_patch_head_out(\n"," clean_cache, corrupted_cache, corrupted_grad_cache\n",")\n","imshow(\n"," head_out_attr,\n"," y=head_out_labels,\n"," yaxis=\"Component\",\n"," xaxis=\"Position\",\n"," title=\"Head Output Attribution Patching\",\n",")\n","sum_head_out_attr = einops.reduce(\n"," head_out_attr,\n"," \"(layer head) pos -> layer head\",\n"," \"sum\",\n"," layer=model.cfg.n_layers,\n"," head=model.cfg.n_heads,\n",")\n","imshow(\n"," sum_head_out_attr,\n"," yaxis=\"Layer\",\n"," xaxis=\"Head Index\",\n"," title=\"Head Output Attribution Patching Sum Over Pos\",\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" ### Head Activation Patching\n"," Intuitively, a head has three inputs, keys, queries and values. We can patch each of these individually to get a sense for where the important part of each head's input comes from!\n"," As a sanity check, we also do this for the mixed value. The result is a linear map of this (`z @ W_O == result`), so this is the same as patching the output of the head.\n"," We plot both the patch for each head over each position, and summed over position (it tends to be pretty sparse, so the latter is the same)"]},{"cell_type":"code","execution_count":20,"metadata":{},"outputs":[{"data":{"text/markdown":["#### Key Head Vector Attribution Patching"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/markdown":["#### Query Head Vector Attribution Patching"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/markdown":["#### Value Head Vector Attribution Patching"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/markdown":["#### Mixed Value Head Vector Attribution Patching"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["from typing_extensions import Literal\n","\n","\n","def stack_head_vector_from_cache(\n"," cache, activation_name: Literal[\"q\", \"k\", \"v\", \"z\"]\n",") -> TT[\"layer_and_head_index\", \"batch\", \"pos\", \"d_head\"]:\n"," \"\"\"Stacks the head vectors from the cache from a specific activation (key, query, value or mixed_value (z)) into a single tensor.\"\"\"\n"," stacked_head_vectors = torch.stack(\n"," [cache[activation_name, l] for l in range(model.cfg.n_layers)], dim=0\n"," )\n"," stacked_head_vectors = einops.rearrange(\n"," stacked_head_vectors,\n"," \"layer batch pos head_index d_head -> (layer head_index) batch pos d_head\",\n"," )\n"," return stacked_head_vectors\n","\n","\n","def attr_patch_head_vector(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n"," activation_name: Literal[\"q\", \"k\", \"v\", \"z\"],\n",") -> TT[\"component\", \"pos\"]:\n"," labels = HEAD_NAMES\n","\n"," clean_head_vector = stack_head_vector_from_cache(clean_cache, activation_name)\n"," corrupted_head_vector = stack_head_vector_from_cache(\n"," corrupted_cache, activation_name\n"," )\n"," corrupted_grad_head_vector = stack_head_vector_from_cache(\n"," corrupted_grad_cache, activation_name\n"," )\n"," head_vector_attr = einops.reduce(\n"," corrupted_grad_head_vector * (clean_head_vector - corrupted_head_vector),\n"," \"component batch pos d_head -> component pos\",\n"," \"sum\",\n"," )\n"," return head_vector_attr, labels\n","\n","\n","head_vector_attr_dict = {}\n","for activation_name, activation_name_full in [\n"," (\"k\", \"Key\"),\n"," (\"q\", \"Query\"),\n"," (\"v\", \"Value\"),\n"," (\"z\", \"Mixed Value\"),\n","]:\n"," display(Markdown(f\"#### {activation_name_full} Head Vector Attribution Patching\"))\n"," head_vector_attr_dict[activation_name], head_vector_labels = attr_patch_head_vector(\n"," clean_cache, corrupted_cache, corrupted_grad_cache, activation_name\n"," )\n"," imshow(\n"," head_vector_attr_dict[activation_name],\n"," y=head_vector_labels,\n"," yaxis=\"Component\",\n"," xaxis=\"Position\",\n"," title=f\"{activation_name_full} Attribution Patching\",\n"," )\n"," sum_head_vector_attr = einops.reduce(\n"," head_vector_attr_dict[activation_name],\n"," \"(layer head) pos -> layer head\",\n"," \"sum\",\n"," layer=model.cfg.n_layers,\n"," head=model.cfg.n_heads,\n"," )\n"," imshow(\n"," sum_head_vector_attr,\n"," yaxis=\"Layer\",\n"," xaxis=\"Head Index\",\n"," title=f\"{activation_name_full} Attribution Patching Sum Over Pos\",\n"," )"]},{"cell_type":"code","execution_count":21,"metadata":{},"outputs":[{"data":{"text/markdown":["### Head Pattern Attribution Patching"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n"," \n","\n"," \n","
\n"," \n"," \n"," "],"text/plain":[""]},"metadata":{},"output_type":"display_data"}],"source":["from typing_extensions import Literal\n","\n","\n","def stack_head_pattern_from_cache(\n"," cache,\n",") -> TT[\"layer_and_head_index\", \"batch\", \"dest_pos\", \"src_pos\"]:\n"," \"\"\"Stacks the head patterns from the cache into a single tensor.\"\"\"\n"," stacked_head_pattern = torch.stack(\n"," [cache[\"pattern\", l] for l in range(model.cfg.n_layers)], dim=0\n"," )\n"," stacked_head_pattern = einops.rearrange(\n"," stacked_head_pattern,\n"," \"layer batch head_index dest_pos src_pos -> (layer head_index) batch dest_pos src_pos\",\n"," )\n"," return stacked_head_pattern\n","\n","\n","def attr_patch_head_pattern(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n",") -> TT[\"component\", \"dest_pos\", \"src_pos\"]:\n"," labels = HEAD_NAMES\n","\n"," clean_head_pattern = stack_head_pattern_from_cache(clean_cache)\n"," corrupted_head_pattern = stack_head_pattern_from_cache(corrupted_cache)\n"," corrupted_grad_head_pattern = stack_head_pattern_from_cache(corrupted_grad_cache)\n"," head_pattern_attr = einops.reduce(\n"," corrupted_grad_head_pattern * (clean_head_pattern - corrupted_head_pattern),\n"," \"component batch dest_pos src_pos -> component dest_pos src_pos\",\n"," \"sum\",\n"," )\n"," return head_pattern_attr, labels\n","\n","\n","head_pattern_attr, labels = attr_patch_head_pattern(\n"," clean_cache, corrupted_cache, corrupted_grad_cache\n",")\n","\n","plot_attention_attr(\n"," einops.rearrange(\n"," head_pattern_attr,\n"," \"(layer head) dest src -> layer head dest src\",\n"," layer=model.cfg.n_layers,\n"," head=model.cfg.n_heads,\n"," ),\n"," clean_tokens,\n"," index=0,\n"," title=\"Head Pattern Attribution Patching\",\n",")"]},{"cell_type":"code","execution_count":22,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def get_head_vector_grad_input_from_grad_cache(\n"," grad_cache: ActivationCache, activation_name: Literal[\"q\", \"k\", \"v\"], layer: int\n",") -> TT[\"batch\", \"pos\", \"head_index\", \"d_model\"]:\n"," vector_grad = grad_cache[activation_name, layer]\n"," ln_scales = grad_cache[\"scale\", layer, \"ln1\"]\n"," attn_layer_object = model.blocks[layer].attn\n"," if activation_name == \"q\":\n"," W = attn_layer_object.W_Q\n"," elif activation_name == \"k\":\n"," W = attn_layer_object.W_K\n"," elif activation_name == \"v\":\n"," W = attn_layer_object.W_V\n"," else:\n"," raise ValueError(\"Invalid activation name\")\n","\n"," return einsum(\n"," \"batch pos head_index d_head, batch pos, head_index d_model d_head -> batch pos head_index d_model\",\n"," vector_grad,\n"," ln_scales.squeeze(-1),\n"," W,\n"," )\n","\n","\n","def get_stacked_head_vector_grad_input(\n"," grad_cache, activation_name: Literal[\"q\", \"k\", \"v\"]\n",") -> TT[\"layer\", \"batch\", \"pos\", \"head_index\", \"d_model\"]:\n"," return torch.stack(\n"," [\n"," get_head_vector_grad_input_from_grad_cache(grad_cache, activation_name, l)\n"," for l in range(model.cfg.n_layers)\n"," ],\n"," dim=0,\n"," )\n","\n","\n","def get_full_vector_grad_input(\n"," grad_cache,\n",") -> TT[\"qkv\", \"layer\", \"batch\", \"pos\", \"head_index\", \"d_model\"]:\n"," return torch.stack(\n"," [\n"," get_stacked_head_vector_grad_input(grad_cache, activation_name)\n"," for activation_name in [\"q\", \"k\", \"v\"]\n"," ],\n"," dim=0,\n"," )\n","\n","\n","def attr_patch_head_path(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n",") -> TT[\"qkv\", \"dest_component\", \"src_component\", \"pos\"]:\n"," \"\"\"\n"," Computes the attribution patch along the path between each pair of heads.\n","\n"," Sets this to zero for the path from any late head to any early head\n","\n"," \"\"\"\n"," start_labels = HEAD_NAMES\n"," end_labels = HEAD_NAMES_QKV\n"," full_vector_grad_input = get_full_vector_grad_input(corrupted_grad_cache)\n"," clean_head_result_stack = clean_cache.stack_head_results(-1)\n"," corrupted_head_result_stack = corrupted_cache.stack_head_results(-1)\n"," diff_head_result = einops.rearrange(\n"," clean_head_result_stack - corrupted_head_result_stack,\n"," \"(layer head_index) batch pos d_model -> layer batch pos head_index d_model\",\n"," layer=model.cfg.n_layers,\n"," head_index=model.cfg.n_heads,\n"," )\n"," path_attr = einsum(\n"," \"qkv layer_end batch pos head_end d_model, layer_start batch pos head_start d_model -> qkv layer_end head_end layer_start head_start pos\",\n"," full_vector_grad_input,\n"," diff_head_result,\n"," )\n"," correct_layer_order_mask = (\n"," torch.arange(model.cfg.n_layers)[None, :, None, None, None, None]\n"," > torch.arange(model.cfg.n_layers)[None, None, None, :, None, None]\n"," ).to(path_attr.device)\n"," zero = torch.zeros(1, device=path_attr.device)\n"," path_attr = torch.where(correct_layer_order_mask, path_attr, zero)\n","\n"," path_attr = einops.rearrange(\n"," path_attr,\n"," \"qkv layer_end head_end layer_start head_start pos -> (layer_end head_end qkv) (layer_start head_start) pos\",\n"," )\n"," return path_attr, end_labels, start_labels\n","\n","\n","head_path_attr, end_labels, start_labels = attr_patch_head_path(\n"," clean_cache, corrupted_cache, corrupted_grad_cache\n",")\n","imshow(\n"," head_path_attr.sum(-1),\n"," y=end_labels,\n"," yaxis=\"Path End (Head Input)\",\n"," x=start_labels,\n"," xaxis=\"Path Start (Head Output)\",\n"," title=\"Head Path Attribution Patching\",\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" This is hard to parse. Here's an experiment with filtering for the most important heads and showing their paths."]},{"cell_type":"code","execution_count":23,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["head_out_values, head_out_indices = head_out_attr.sum(-1).abs().sort(descending=True)\n","line(head_out_values)\n","top_head_indices = head_out_indices[:22].sort().values\n","top_end_indices = []\n","top_end_labels = []\n","top_start_indices = []\n","top_start_labels = []\n","for i in top_head_indices:\n"," i = i.item()\n"," top_start_indices.append(i)\n"," top_start_labels.append(start_labels[i])\n"," for j in range(3):\n"," top_end_indices.append(3 * i + j)\n"," top_end_labels.append(end_labels[3 * i + j])\n","\n","imshow(\n"," head_path_attr[top_end_indices, :][:, top_start_indices].sum(-1),\n"," y=top_end_labels,\n"," yaxis=\"Path End (Head Input)\",\n"," x=top_start_labels,\n"," xaxis=\"Path Start (Head Output)\",\n"," title=\"Head Path Attribution Patching (Filtered for Top Heads)\",\n",")"]},{"cell_type":"code","execution_count":24,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["for j, composition_type in enumerate([\"Query\", \"Key\", \"Value\"]):\n"," imshow(\n"," head_path_attr[top_end_indices, :][:, top_start_indices][j::3].sum(-1),\n"," y=top_end_labels[j::3],\n"," yaxis=\"Path End (Head Input)\",\n"," x=top_start_labels,\n"," xaxis=\"Path Start (Head Output)\",\n"," title=f\"Head Path to {composition_type} Attribution Patching (Filtered for Top Heads)\",\n"," )"]},{"cell_type":"code","execution_count":25,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["top_head_path_attr = einops.rearrange(\n"," head_path_attr[top_end_indices, :][:, top_start_indices].sum(-1),\n"," \"(head_end qkv) head_start -> qkv head_end head_start\",\n"," qkv=3,\n",")\n","imshow(\n"," top_head_path_attr,\n"," y=[i[:-1] for i in top_end_labels[::3]],\n"," yaxis=\"Path End (Head Input)\",\n"," x=top_start_labels,\n"," xaxis=\"Path Start (Head Output)\",\n"," title=f\"Head Path Attribution Patching (Filtered for Top Heads)\",\n"," facet_col=0,\n"," facet_labels=[\"Query\", \"Key\", \"Value\"],\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" Let's now dive into 3 interesting heads: L5H5 (induction head), L8H6 (S-Inhibition Head), L9H9 (Name Mover) and look at their input and output paths (note - Q input means )"]},{"cell_type":"code","execution_count":26,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["interesting_heads = [\n"," 5 * model.cfg.n_heads + 5,\n"," 8 * model.cfg.n_heads + 6,\n"," 9 * model.cfg.n_heads + 9,\n","]\n","interesting_head_labels = [HEAD_NAMES[i] for i in interesting_heads]\n","for head_index, label in zip(interesting_heads, interesting_head_labels):\n"," in_paths = head_path_attr[3 * head_index : 3 * head_index + 3].sum(-1)\n"," out_paths = head_path_attr[:, head_index].sum(-1)\n"," out_paths = einops.rearrange(out_paths, \"(layer_head qkv) -> qkv layer_head\", qkv=3)\n"," all_paths = torch.cat([in_paths, out_paths], dim=0)\n"," all_paths = einops.rearrange(\n"," all_paths,\n"," \"path_type (layer head) -> path_type layer head\",\n"," layer=model.cfg.n_layers,\n"," head=model.cfg.n_heads,\n"," )\n"," imshow(\n"," all_paths,\n"," facet_col=0,\n"," facet_labels=[\n"," \"Query (In)\",\n"," \"Key (In)\",\n"," \"Value (In)\",\n"," \"Query (Out)\",\n"," \"Key (Out)\",\n"," \"Value (Out)\",\n"," ],\n"," title=f\"Input and Output Paths for head {label}\",\n"," yaxis=\"Layer\",\n"," xaxis=\"Head\",\n"," )"]},{"cell_type":"markdown","metadata":{},"source":[" ## Validating Attribution vs Activation Patching\n"," Let's now compare attribution and activation patching. Generally it's a decent approximation! The main place it fails is MLP0 and the residual stream\n"," My fuzzy intuition is that attribution patching works badly for \"big\" things which are poorly modelled as linear approximations, and works well for \"small\" things which are more like incremental changes. Anything involving replacing the embedding is a \"big\" thing, which includes residual streams, and in GPT-2 small MLP0 seems to be used as an \"extended embedding\" (where later layers use MLP0's output instead of the token embedding), so I also count it as big.\n"," See more discussion in the accompanying blog post!\n"]},{"cell_type":"markdown","metadata":{},"source":[" First do some refactoring to make attribution patching more generic. We make an attribution cache, which is an ActivationCache where each element is (clean_act - corrupted_act) * corrupted_grad, so that it's the per-element attribution for each activation. Thanks to linearity, we just compute things by adding stuff up along the relevant dimensions!"]},{"cell_type":"code","execution_count":27,"metadata":{},"outputs":[],"source":["attribution_cache_dict = {}\n","for key in corrupted_grad_cache.cache_dict.keys():\n"," attribution_cache_dict[key] = corrupted_grad_cache.cache_dict[key] * (\n"," clean_cache.cache_dict[key] - corrupted_cache.cache_dict[key]\n"," )\n","attr_cache = ActivationCache(attribution_cache_dict, model)"]},{"cell_type":"markdown","metadata":{},"source":[" By block: For each head we patch the starting residual stream, attention output + MLP output"]},{"cell_type":"code","execution_count":28,"metadata":{},"outputs":[],"source":["str_tokens = model.to_str_tokens(clean_tokens[0])\n","context_length = len(str_tokens)"]},{"cell_type":"code","execution_count":29,"metadata":{},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"95a5290e11b64b6a95ef5dd37d027c7a","version_major":2,"version_minor":0},"text/plain":[" 0%| | 0/180 [00:00\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["every_block_act_patch_result = patching.get_act_patch_block_every(\n"," model, corrupted_tokens, clean_cache, ioi_metric\n",")\n","imshow(\n"," every_block_act_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Residual Stream\", \"Attn Output\", \"MLP Output\"],\n"," title=\"Activation Patching Per Block\",\n"," xaxis=\"Position\",\n"," yaxis=\"Layer\",\n"," zmax=1,\n"," zmin=-1,\n"," x=[f\"{tok}_{i}\" for i, tok in enumerate(model.to_str_tokens(clean_tokens[0]))],\n",")"]},{"cell_type":"code","execution_count":30,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def get_attr_patch_block_every(attr_cache):\n"," resid_pre_attr = einops.reduce(\n"," attr_cache.stack_activation(\"resid_pre\"),\n"," \"layer batch pos d_model -> layer pos\",\n"," \"sum\",\n"," )\n"," attn_out_attr = einops.reduce(\n"," attr_cache.stack_activation(\"attn_out\"),\n"," \"layer batch pos d_model -> layer pos\",\n"," \"sum\",\n"," )\n"," mlp_out_attr = einops.reduce(\n"," attr_cache.stack_activation(\"mlp_out\"),\n"," \"layer batch pos d_model -> layer pos\",\n"," \"sum\",\n"," )\n","\n"," every_block_attr_patch_result = torch.stack(\n"," [resid_pre_attr, attn_out_attr, mlp_out_attr], dim=0\n"," )\n"," return every_block_attr_patch_result\n","\n","\n","every_block_attr_patch_result = get_attr_patch_block_every(attr_cache)\n","imshow(\n"," every_block_attr_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Residual Stream\", \"Attn Output\", \"MLP Output\"],\n"," title=\"Attribution Patching Per Block\",\n"," xaxis=\"Position\",\n"," yaxis=\"Layer\",\n"," zmax=1,\n"," zmin=-1,\n"," x=[f\"{tok}_{i}\" for i, tok in enumerate(model.to_str_tokens(clean_tokens[0]))],\n",")"]},{"cell_type":"code","execution_count":31,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["scatter(\n"," y=every_block_attr_patch_result.reshape(3, -1),\n"," x=every_block_act_patch_result.reshape(3, -1),\n"," facet_col=0,\n"," facet_labels=[\"Residual Stream\", \"Attn Output\", \"MLP Output\"],\n"," title=\"Attribution vs Activation Patching Per Block\",\n"," xaxis=\"Activation Patch\",\n"," yaxis=\"Attribution Patch\",\n"," hover=[\n"," f\"Layer {l}, Position {p}, |{str_tokens[p]}|\"\n"," for l in range(model.cfg.n_layers)\n"," for p in range(context_length)\n"," ],\n"," color=einops.repeat(\n"," torch.arange(model.cfg.n_layers), \"layer -> (layer pos)\", pos=context_length\n"," ),\n"," color_continuous_scale=\"Portland\",\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" By head: For each head we patch the output, query, key, value or pattern. We do all positions at once so it's not super slow."]},{"cell_type":"code","execution_count":32,"metadata":{},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"18b2e6b0985b40cd8c0cd1a16ba62975","version_major":2,"version_minor":0},"text/plain":[" 0%| | 0/144 [00:00\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["every_head_all_pos_act_patch_result = patching.get_act_patch_attn_head_all_pos_every(\n"," model, corrupted_tokens, clean_cache, ioi_metric\n",")\n","imshow(\n"," every_head_all_pos_act_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Activation Patching Per Head (All Pos)\",\n"," xaxis=\"Head\",\n"," yaxis=\"Layer\",\n"," zmax=1,\n"," zmin=-1,\n",")"]},{"cell_type":"code","execution_count":33,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def get_attr_patch_attn_head_all_pos_every(attr_cache):\n"," head_out_all_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"z\"),\n"," \"layer batch pos head_index d_head -> layer head_index\",\n"," \"sum\",\n"," )\n"," head_q_all_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"q\"),\n"," \"layer batch pos head_index d_head -> layer head_index\",\n"," \"sum\",\n"," )\n"," head_k_all_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"k\"),\n"," \"layer batch pos head_index d_head -> layer head_index\",\n"," \"sum\",\n"," )\n"," head_v_all_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"v\"),\n"," \"layer batch pos head_index d_head -> layer head_index\",\n"," \"sum\",\n"," )\n"," head_pattern_all_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"pattern\"),\n"," \"layer batch head_index dest_pos src_pos -> layer head_index\",\n"," \"sum\",\n"," )\n","\n"," return torch.stack(\n"," [\n"," head_out_all_pos_attr,\n"," head_q_all_pos_attr,\n"," head_k_all_pos_attr,\n"," head_v_all_pos_attr,\n"," head_pattern_all_pos_attr,\n"," ]\n"," )\n","\n","\n","every_head_all_pos_attr_patch_result = get_attr_patch_attn_head_all_pos_every(\n"," attr_cache\n",")\n","imshow(\n"," every_head_all_pos_attr_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Attribution Patching Per Head (All Pos)\",\n"," xaxis=\"Head\",\n"," yaxis=\"Layer\",\n"," zmax=1,\n"," zmin=-1,\n",")"]},{"cell_type":"code","execution_count":34,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["scatter(\n"," y=every_head_all_pos_attr_patch_result.reshape(5, -1),\n"," x=every_head_all_pos_act_patch_result.reshape(5, -1),\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Attribution vs Activation Patching Per Head (All Pos)\",\n"," xaxis=\"Activation Patch\",\n"," yaxis=\"Attribution Patch\",\n"," include_diag=True,\n"," hover=head_out_labels,\n"," color=einops.repeat(\n"," torch.arange(model.cfg.n_layers),\n"," \"layer -> (layer head)\",\n"," head=model.cfg.n_heads,\n"," ),\n"," color_continuous_scale=\"Portland\",\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" We see pretty good results in general, but significant errors for heads L5H5 on query and moderate errors for head L10H7 on query and key, and moderate errors for head L11H10 on key. But each of these is fine for pattern and output. My guess is that the problem is that these have pretty saturated attention on a single token, and the linear approximation is thus not great on the attention calculation here, but I'm not sure. When we plot the attention patterns, we do see this!\n"," Note that the axis labels are for the *first* prompt's tokens, but each facet is a different prompt, so this is somewhat inaccurate. In particular, every odd facet has indirect object and subject in the opposite order (IO first). But otherwise everything lines up between the prompts"]},{"cell_type":"code","execution_count":35,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["graph_tok_labels = [\n"," f\"{tok}_{i}\" for i, tok in enumerate(model.to_str_tokens(clean_tokens[0]))\n","]\n","imshow(\n"," clean_cache[\"pattern\", 5][:, 5],\n"," x=graph_tok_labels,\n"," y=graph_tok_labels,\n"," facet_col=0,\n"," title=\"Attention for Head L5H5\",\n"," facet_name=\"Prompt\",\n",")\n","imshow(\n"," clean_cache[\"pattern\", 10][:, 7],\n"," x=graph_tok_labels,\n"," y=graph_tok_labels,\n"," facet_col=0,\n"," title=\"Attention for Head L10H7\",\n"," facet_name=\"Prompt\",\n",")\n","imshow(\n"," clean_cache[\"pattern\", 11][:, 10],\n"," x=graph_tok_labels,\n"," y=graph_tok_labels,\n"," facet_col=0,\n"," title=\"Attention for Head L11H10\",\n"," facet_name=\"Prompt\",\n",")\n","\n","\n","# [markdown]"]},{"cell_type":"code","execution_count":36,"metadata":{},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"06f39489001845849fbc7446a07066f4","version_major":2,"version_minor":0},"text/plain":[" 0%| | 0/2160 [00:00\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["every_head_by_pos_act_patch_result = patching.get_act_patch_attn_head_by_pos_every(\n"," model, corrupted_tokens, clean_cache, ioi_metric\n",")\n","every_head_by_pos_act_patch_result = einops.rearrange(\n"," every_head_by_pos_act_patch_result,\n"," \"act_type layer pos head -> act_type (layer head) pos\",\n",")\n","imshow(\n"," every_head_by_pos_act_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Activation Patching Per Head (By Pos)\",\n"," xaxis=\"Position\",\n"," yaxis=\"Layer & Head\",\n"," zmax=1,\n"," zmin=-1,\n"," x=[f\"{tok}_{i}\" for i, tok in enumerate(model.to_str_tokens(clean_tokens[0]))],\n"," y=head_out_labels,\n",")"]},{"cell_type":"code","execution_count":37,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def get_attr_patch_attn_head_by_pos_every(attr_cache):\n"," head_out_by_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"z\"),\n"," \"layer batch pos head_index d_head -> layer pos head_index\",\n"," \"sum\",\n"," )\n"," head_q_by_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"q\"),\n"," \"layer batch pos head_index d_head -> layer pos head_index\",\n"," \"sum\",\n"," )\n"," head_k_by_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"k\"),\n"," \"layer batch pos head_index d_head -> layer pos head_index\",\n"," \"sum\",\n"," )\n"," head_v_by_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"v\"),\n"," \"layer batch pos head_index d_head -> layer pos head_index\",\n"," \"sum\",\n"," )\n"," head_pattern_by_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"pattern\"),\n"," \"layer batch head_index dest_pos src_pos -> layer dest_pos head_index\",\n"," \"sum\",\n"," )\n","\n"," return torch.stack(\n"," [\n"," head_out_by_pos_attr,\n"," head_q_by_pos_attr,\n"," head_k_by_pos_attr,\n"," head_v_by_pos_attr,\n"," head_pattern_by_pos_attr,\n"," ]\n"," )\n","\n","\n","every_head_by_pos_attr_patch_result = get_attr_patch_attn_head_by_pos_every(attr_cache)\n","every_head_by_pos_attr_patch_result = einops.rearrange(\n"," every_head_by_pos_attr_patch_result,\n"," \"act_type layer pos head -> act_type (layer head) pos\",\n",")\n","imshow(\n"," every_head_by_pos_attr_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Attribution Patching Per Head (By Pos)\",\n"," xaxis=\"Position\",\n"," yaxis=\"Layer & Head\",\n"," zmax=1,\n"," zmin=-1,\n"," x=[f\"{tok}_{i}\" for i, tok in enumerate(model.to_str_tokens(clean_tokens[0]))],\n"," y=head_out_labels,\n",")"]},{"cell_type":"code","execution_count":38,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["scatter(\n"," y=every_head_by_pos_attr_patch_result.reshape(5, -1),\n"," x=every_head_by_pos_act_patch_result.reshape(5, -1),\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Attribution vs Activation Patching Per Head (by Pos)\",\n"," xaxis=\"Activation Patch\",\n"," yaxis=\"Attribution Patch\",\n"," include_diag=True,\n"," hover=[f\"{label} {tok}\" for label in head_out_labels for tok in graph_tok_labels],\n"," color=einops.repeat(\n"," torch.arange(model.cfg.n_layers),\n"," \"layer -> (layer head pos)\",\n"," head=model.cfg.n_heads,\n"," pos=15,\n"," ),\n"," color_continuous_scale=\"Portland\",\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" ## Factual Knowledge Patching Example\n"," Incomplete, but maybe of interest!\n"," Note that I have better results with the corrupted prompt as having random words rather than Colosseum."]},{"cell_type":"code","execution_count":39,"metadata":{},"outputs":[{"name":"stderr","output_type":"stream","text":["Using pad_token, but it is not set yet.\n"]},{"name":"stdout","output_type":"stream","text":["Loaded pretrained model gpt2-xl into HookedTransformer\n","Tokenized prompt: ['<|endoftext|>', 'The', ' E', 'iff', 'el', ' Tower', ' is', ' located', ' in', ' the', ' city', ' of']\n","Tokenized answer: [' Paris']\n"]},{"data":{"text/html":["
Performance on answer token:\n","Rank: 0        Logit: 20.73 Prob: 95.80% Token: | Paris|\n","
\n"],"text/plain":["Performance on answer token:\n","\u001b[1mRank: \u001b[0m\u001b[1;36m0\u001b[0m\u001b[1m Logit: \u001b[0m\u001b[1;36m20.73\u001b[0m\u001b[1m Prob: \u001b[0m\u001b[1;36m95.80\u001b[0m\u001b[1m% Token: | Paris|\u001b[0m\n"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Top 0th token. Logit: 20.73 Prob: 95.80% Token: | Paris|\n","Top 1th token. Logit: 16.49 Prob: 1.39% Token: | E|\n","Top 2th token. Logit: 14.69 Prob: 0.23% Token: | the|\n","Top 3th token. Logit: 14.58 Prob: 0.21% Token: | É|\n","Top 4th token. Logit: 14.44 Prob: 0.18% Token: | France|\n","Top 5th token. Logit: 14.36 Prob: 0.16% Token: | Mont|\n","Top 6th token. Logit: 13.77 Prob: 0.09% Token: | Le|\n","Top 7th token. Logit: 13.66 Prob: 0.08% Token: | Ang|\n","Top 8th token. Logit: 13.43 Prob: 0.06% Token: | V|\n","Top 9th token. Logit: 13.42 Prob: 0.06% Token: | Stras|\n"]},{"data":{"text/html":["
Ranks of the answer tokens: [(' Paris', 0)]\n","
\n"],"text/plain":["\u001b[1mRanks of the answer tokens:\u001b[0m \u001b[1m[\u001b[0m\u001b[1m(\u001b[0m\u001b[32m' Paris'\u001b[0m, \u001b[1;36m0\u001b[0m\u001b[1m)\u001b[0m\u001b[1m]\u001b[0m\n"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Tokenized prompt: ['<|endoftext|>', 'The', ' Col', 'os', 'se', 'um', ' is', ' located', ' in', ' the', ' city', ' of']\n","Tokenized answer: [' Rome']\n"]},{"data":{"text/html":["
Performance on answer token:\n","Rank: 0        Logit: 20.02 Prob: 83.70% Token: | Rome|\n","
\n"],"text/plain":["Performance on answer token:\n","\u001b[1mRank: \u001b[0m\u001b[1;36m0\u001b[0m\u001b[1m Logit: \u001b[0m\u001b[1;36m20.02\u001b[0m\u001b[1m Prob: \u001b[0m\u001b[1;36m83.70\u001b[0m\u001b[1m% Token: | Rome|\u001b[0m\n"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Top 0th token. Logit: 20.02 Prob: 83.70% Token: | Rome|\n","Top 1th token. Logit: 17.03 Prob: 4.23% Token: | Naples|\n","Top 2th token. Logit: 16.85 Prob: 3.51% Token: | Pompe|\n","Top 3th token. Logit: 16.14 Prob: 1.73% Token: | Ver|\n","Top 4th token. Logit: 15.87 Prob: 1.32% Token: | Florence|\n","Top 5th token. Logit: 14.77 Prob: 0.44% Token: | Roma|\n","Top 6th token. Logit: 14.68 Prob: 0.40% Token: | Milan|\n","Top 7th token. Logit: 14.66 Prob: 0.39% Token: | ancient|\n","Top 8th token. Logit: 14.37 Prob: 0.29% Token: | Pal|\n","Top 9th token. Logit: 14.30 Prob: 0.27% Token: | Constantinople|\n"]},{"data":{"text/html":["
Ranks of the answer tokens: [(' Rome', 0)]\n","
\n"],"text/plain":["\u001b[1mRanks of the answer tokens:\u001b[0m \u001b[1m[\u001b[0m\u001b[1m(\u001b[0m\u001b[32m' Rome'\u001b[0m, \u001b[1;36m0\u001b[0m\u001b[1m)\u001b[0m\u001b[1m]\u001b[0m\n"]},"metadata":{},"output_type":"display_data"}],"source":["gpt2_xl = HookedTransformer.from_pretrained(\"gpt2-xl\")\n","clean_prompt = \"The Eiffel Tower is located in the city of\"\n","clean_answer = \" Paris\"\n","# corrupted_prompt = \"The red brown fox jumps is located in the city of\"\n","corrupted_prompt = \"The Colosseum is located in the city of\"\n","corrupted_answer = \" Rome\"\n","utils.test_prompt(clean_prompt, clean_answer, gpt2_xl)\n","utils.test_prompt(corrupted_prompt, corrupted_answer, gpt2_xl)"]},{"cell_type":"code","execution_count":40,"metadata":{},"outputs":[],"source":["clean_answer_index = gpt2_xl.to_single_token(clean_answer)\n","corrupted_answer_index = gpt2_xl.to_single_token(corrupted_answer)\n","\n","\n","def factual_logit_diff(logits: TT[\"batch\", \"position\", \"d_vocab\"]):\n"," return logits[0, -1, clean_answer_index] - logits[0, -1, corrupted_answer_index]"]},{"cell_type":"code","execution_count":41,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean logit diff: 10.634519577026367\n","Corrupted logit diff: -8.988396644592285\n","Clean Metric: tensor(1., device='cuda:0', grad_fn=)\n","Corrupted Metric: tensor(0., device='cuda:0', grad_fn=)\n"]}],"source":["clean_logits, clean_cache = gpt2_xl.run_with_cache(clean_prompt)\n","CLEAN_LOGIT_DIFF_FACTUAL = factual_logit_diff(clean_logits).item()\n","corrupted_logits, _ = gpt2_xl.run_with_cache(corrupted_prompt)\n","CORRUPTED_LOGIT_DIFF_FACTUAL = factual_logit_diff(corrupted_logits).item()\n","\n","\n","def factual_metric(logits: TT[\"batch\", \"position\", \"d_vocab\"]):\n"," return (factual_logit_diff(logits) - CORRUPTED_LOGIT_DIFF_FACTUAL) / (\n"," CLEAN_LOGIT_DIFF_FACTUAL - CORRUPTED_LOGIT_DIFF_FACTUAL\n"," )\n","\n","\n","print(\"Clean logit diff:\", CLEAN_LOGIT_DIFF_FACTUAL)\n","print(\"Corrupted logit diff:\", CORRUPTED_LOGIT_DIFF_FACTUAL)\n","print(\"Clean Metric:\", factual_metric(clean_logits))\n","print(\"Corrupted Metric:\", factual_metric(corrupted_logits))"]},{"cell_type":"code","execution_count":42,"metadata":{},"outputs":[],"source":["# corrupted_value, corrupted_cache, corrupted_grad_cache = get_cache_fwd_and_bwd(gpt2_xl, corrupted_prompt, factual_metric)"]},{"cell_type":"code","execution_count":43,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean: ['<|endoftext|>', 'The', ' E', 'iff', 'el', ' Tower', ' is', ' located', ' in', ' the', ' city', ' of']\n","Corrupted: ['<|endoftext|>', 'The', ' Col', 'os', 'se', 'um', ' is', ' located', ' in', ' the', ' city', ' of']\n"]}],"source":["clean_tokens = gpt2_xl.to_tokens(clean_prompt)\n","clean_str_tokens = gpt2_xl.to_str_tokens(clean_prompt)\n","corrupted_tokens = gpt2_xl.to_tokens(corrupted_prompt)\n","corrupted_str_tokens = gpt2_xl.to_str_tokens(corrupted_prompt)\n","print(\"Clean:\", clean_str_tokens)\n","print(\"Corrupted:\", corrupted_str_tokens)"]},{"cell_type":"code","execution_count":44,"metadata":{},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"b767eef7a3cd49b9b3cb6e5301463f08","version_major":2,"version_minor":0},"text/plain":[" 0%| | 0/48 [00:00\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def act_patch_residual(clean_cache, corrupted_tokens, model: HookedTransformer, metric):\n"," if len(corrupted_tokens.shape) == 2:\n"," corrupted_tokens = corrupted_tokens[0]\n"," residual_patches = torch.zeros(\n"," (model.cfg.n_layers, len(corrupted_tokens)), device=model.cfg.device\n"," )\n","\n"," def residual_hook(resid_pre, hook, layer, pos):\n"," resid_pre[:, pos, :] = clean_cache[\"resid_pre\", layer][:, pos, :]\n"," return resid_pre\n","\n"," for layer in tqdm.tqdm(range(model.cfg.n_layers)):\n"," for pos in range(len(corrupted_tokens)):\n"," patched_logits = model.run_with_hooks(\n"," corrupted_tokens,\n"," fwd_hooks=[\n"," (\n"," f\"blocks.{layer}.hook_resid_pre\",\n"," partial(residual_hook, layer=layer, pos=pos),\n"," )\n"," ],\n"," )\n"," residual_patches[layer, pos] = metric(patched_logits).item()\n"," return residual_patches\n","\n","\n","residual_act_patch = act_patch_residual(\n"," clean_cache, corrupted_tokens, gpt2_xl, factual_metric\n",")\n","\n","imshow(\n"," residual_act_patch,\n"," title=\"Factual Recall Patching (Residual)\",\n"," xaxis=\"Position\",\n"," yaxis=\"Layer\",\n"," x=clean_str_tokens,\n",")"]}],"metadata":{"kernelspec":{"display_name":"base","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.8"},"orig_nbformat":4,"vscode":{"interpreter":{"hash":"d4d1e4263499bec80672ea0156c357c1ee493ec2b1c70f0acce89fc37c4a6abe"}}},"nbformat":4,"nbformat_minor":2} +{"cells":[{"cell_type":"markdown","metadata":{},"source":["\n"," \"Open\n",""]},{"cell_type":"markdown","metadata":{},"source":[" # Attribution Patching Demo\n"," **Read [the accompanying blog post here](https://neelnanda.io/attribution-patching) for more context**\n"," This is an interim research report, giving a whirlwind tour of some unpublished work I did at Anthropic (credit to the then team - Chris Olah, Catherine Olsson, Nelson Elhage and Tristan Hume for help, support, and mentorship!)\n","\n"," The goal of this work is run activation patching at an industrial scale, by using gradient based attribution to approximate the technique - allow an arbitrary number of patches to be made on two forwards and a single backward pass\n","\n"," I have had less time than hoped to flesh out this investigation, but am writing up a rough investigation and comparison to standard activation patching on a few tasks to give a sense of the potential of this approach, and where it works vs falls down."]},{"cell_type":"markdown","metadata":{},"source":[" To use this notebook, go to Runtime > Change Runtime Type and select GPU as the hardware accelerator.\n","\n"," **Tips for reading this Colab:**\n"," * You can run all this code for yourself!\n"," * The graphs are interactive!\n"," * Use the table of contents pane in the sidebar to navigate\n"," * Collapse irrelevant sections with the dropdown arrows\n"," * Search the page using the search in the sidebar, not CTRL+F"]},{"cell_type":"markdown","metadata":{},"source":[" ## Setup (Ignore)"]},{"cell_type":"code","execution_count":1,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Running as a Jupyter notebook - intended for development only!\n"]},{"name":"stderr","output_type":"stream","text":["/var/folders/m3/z6c6rcdj1rbb2jh9vqpgvxg40000gn/T/ipykernel_25358/2480103146.py:24: DeprecationWarning: `magic(...)` is deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s).\n"," ipython.magic(\"load_ext autoreload\")\n","/var/folders/m3/z6c6rcdj1rbb2jh9vqpgvxg40000gn/T/ipykernel_25358/2480103146.py:25: DeprecationWarning: `magic(...)` is deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s).\n"," ipython.magic(\"autoreload 2\")\n"]}],"source":["# Janky code to do different setup when run in a Colab notebook vs VSCode\n","import os\n","\n","DEBUG_MODE = False\n","IN_GITHUB = os.getenv(\"GITHUB_ACTIONS\") == \"true\"\n","try:\n"," import google.colab\n","\n"," IN_COLAB = True\n"," print(\"Running as a Colab notebook\")\n","except:\n"," IN_COLAB = False\n"," print(\"Running as a Jupyter notebook - intended for development only!\")\n"," from IPython import get_ipython\n","\n"," ipython = get_ipython()\n"," # Code to automatically update the HookedTransformer code as its edited without restarting the kernel\n"," ipython.magic(\"load_ext autoreload\")\n"," ipython.magic(\"autoreload 2\")\n","\n","if IN_COLAB or IN_GITHUB:\n"," %pip install transformer_lens\n"," %pip install torchtyping\n"," # Install my janky personal plotting utils\n"," %pip install git+https://github.com/neelnanda-io/neel-plotly.git\n"," # Install another version of node that makes PySvelte work way faster\n"," %pip install circuitsvis\n"," # Needed for PySvelte to work, v3 came out and broke things...\n"," %pip install typeguard==2.13.3"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["# Plotly needs a different renderer for VSCode/Notebooks vs Colab argh\n","import plotly.io as pio\n","\n","if IN_COLAB or not DEBUG_MODE:\n"," # Thanks to annoying rendering issues, Plotly graphics will either show up in colab OR Vscode depending on the renderer - this is bad for developing demos! Thus creating a debug mode.\n"," pio.renderers.default = \"colab\"\n","else:\n"," pio.renderers.default = \"notebook_connected\""]},{"cell_type":"code","execution_count":3,"metadata":{},"outputs":[{"ename":"ModuleNotFoundError","evalue":"No module named 'torchtyping'","output_type":"error","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)","Cell \u001b[0;32mIn[3], line 15\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mplotly\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mexpress\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mpx\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutils\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mdata\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m DataLoader\n\u001b[0;32m---> 15\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorchtyping\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m TensorType \u001b[38;5;28;01mas\u001b[39;00m TT\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtyping\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m List, Union, Optional, Callable\n\u001b[1;32m 17\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mfunctools\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m partial\n","\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'torchtyping'"]}],"source":["# Import stuff\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import torch.optim as optim\n","import numpy as np\n","import einops\n","from fancy_einsum import einsum\n","import tqdm.notebook as tqdm\n","import random\n","from pathlib import Path\n","import plotly.express as px\n","from torch.utils.data import DataLoader\n","\n","from torchtyping import TensorType as TT\n","from typing import List, Union, Optional, Callable\n","from functools import partial\n","import copy\n","import itertools\n","import json\n","\n","from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer\n","import dataclasses\n","import datasets\n","from IPython.display import HTML, Markdown"]},{"cell_type":"code","execution_count":5,"metadata":{},"outputs":[],"source":["import transformer_lens\n","import transformer_lens.utils as utils\n","from transformer_lens.hook_points import (\n"," HookedRootModule,\n"," HookPoint,\n",") # Hooking utilities\n","from transformer_lens import (\n"," HookedTransformer,\n"," HookedTransformerConfig,\n"," FactoredMatrix,\n"," ActivationCache,\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" Plotting helper functions from a janky personal library of plotting utils. The library is not documented and I recommend against trying to read it, just use your preferred plotting library if you want to do anything non-obvious:"]},{"cell_type":"code","execution_count":6,"metadata":{},"outputs":[],"source":["from neel_plotly import line, imshow, scatter"]},{"cell_type":"code","execution_count":7,"metadata":{},"outputs":[],"source":["import transformer_lens.patching as patching"]},{"cell_type":"markdown","metadata":{},"source":[" ## IOI Patching Setup\n"," This just copies the relevant set up from Exploratory Analysis Demo, and isn't very important."]},{"cell_type":"code","execution_count":8,"metadata":{},"outputs":[{"name":"stderr","output_type":"stream","text":["Using pad_token, but it is not set yet.\n"]},{"name":"stdout","output_type":"stream","text":["Loaded pretrained model gpt2-small into HookedTransformer\n"]}],"source":["model = HookedTransformer.from_pretrained(\"gpt2-small\")\n","model.set_use_attn_result(True)"]},{"cell_type":"code","execution_count":9,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean string 0 <|endoftext|>When John and Mary went to the shops, John gave the bag to\n","Corrupted string 0 <|endoftext|>When John and Mary went to the shops, Mary gave the bag to\n","Answer token indices tensor([[ 5335, 1757],\n"," [ 1757, 5335],\n"," [ 4186, 3700],\n"," [ 3700, 4186],\n"," [ 6035, 15686],\n"," [15686, 6035],\n"," [ 5780, 14235],\n"," [14235, 5780]], device='cuda:0')\n"]}],"source":["prompts = [\n"," \"When John and Mary went to the shops, John gave the bag to\",\n"," \"When John and Mary went to the shops, Mary gave the bag to\",\n"," \"When Tom and James went to the park, James gave the ball to\",\n"," \"When Tom and James went to the park, Tom gave the ball to\",\n"," \"When Dan and Sid went to the shops, Sid gave an apple to\",\n"," \"When Dan and Sid went to the shops, Dan gave an apple to\",\n"," \"After Martin and Amy went to the park, Amy gave a drink to\",\n"," \"After Martin and Amy went to the park, Martin gave a drink to\",\n","]\n","answers = [\n"," (\" Mary\", \" John\"),\n"," (\" John\", \" Mary\"),\n"," (\" Tom\", \" James\"),\n"," (\" James\", \" Tom\"),\n"," (\" Dan\", \" Sid\"),\n"," (\" Sid\", \" Dan\"),\n"," (\" Martin\", \" Amy\"),\n"," (\" Amy\", \" Martin\"),\n","]\n","\n","clean_tokens = model.to_tokens(prompts)\n","# Swap each adjacent pair, with a hacky list comprehension\n","corrupted_tokens = clean_tokens[\n"," [(i + 1 if i % 2 == 0 else i - 1) for i in range(len(clean_tokens))]\n","]\n","print(\"Clean string 0\", model.to_string(clean_tokens[0]))\n","print(\"Corrupted string 0\", model.to_string(corrupted_tokens[0]))\n","\n","answer_token_indices = torch.tensor(\n"," [\n"," [model.to_single_token(answers[i][j]) for j in range(2)]\n"," for i in range(len(answers))\n"," ],\n"," device=model.cfg.device,\n",")\n","print(\"Answer token indices\", answer_token_indices)"]},{"cell_type":"code","execution_count":10,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean logit diff: 3.5519\n","Corrupted logit diff: -3.5519\n"]}],"source":["def get_logit_diff(logits, answer_token_indices=answer_token_indices):\n"," if len(logits.shape) == 3:\n"," # Get final logits only\n"," logits = logits[:, -1, :]\n"," correct_logits = logits.gather(1, answer_token_indices[:, 0].unsqueeze(1))\n"," incorrect_logits = logits.gather(1, answer_token_indices[:, 1].unsqueeze(1))\n"," return (correct_logits - incorrect_logits).mean()\n","\n","\n","clean_logits, clean_cache = model.run_with_cache(clean_tokens)\n","corrupted_logits, corrupted_cache = model.run_with_cache(corrupted_tokens)\n","\n","clean_logit_diff = get_logit_diff(clean_logits, answer_token_indices).item()\n","print(f\"Clean logit diff: {clean_logit_diff:.4f}\")\n","\n","corrupted_logit_diff = get_logit_diff(corrupted_logits, answer_token_indices).item()\n","print(f\"Corrupted logit diff: {corrupted_logit_diff:.4f}\")"]},{"cell_type":"code","execution_count":11,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean Baseline is 1: 1.0000\n","Corrupted Baseline is 0: 0.0000\n"]}],"source":["CLEAN_BASELINE = clean_logit_diff\n","CORRUPTED_BASELINE = corrupted_logit_diff\n","\n","\n","def ioi_metric(logits, answer_token_indices=answer_token_indices):\n"," return (get_logit_diff(logits, answer_token_indices) - CORRUPTED_BASELINE) / (\n"," CLEAN_BASELINE - CORRUPTED_BASELINE\n"," )\n","\n","\n","print(f\"Clean Baseline is 1: {ioi_metric(clean_logits).item():.4f}\")\n","print(f\"Corrupted Baseline is 0: {ioi_metric(corrupted_logits).item():.4f}\")"]},{"cell_type":"markdown","metadata":{},"source":[" ## Patching\n"," In the following cells, we define attribution patching and use it in various ways on the model."]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["Metric = Callable[[TT[\"batch_and_pos_dims\", \"d_model\"]], float]"]},{"cell_type":"code","execution_count":13,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean Value: 1.0\n","Clean Activations Cached: 220\n","Clean Gradients Cached: 220\n","Corrupted Value: 0.0\n","Corrupted Activations Cached: 220\n","Corrupted Gradients Cached: 220\n"]}],"source":["filter_not_qkv_input = lambda name: \"_input\" not in name\n","\n","\n","def get_cache_fwd_and_bwd(model, tokens, metric):\n"," model.reset_hooks()\n"," cache = {}\n","\n"," def forward_cache_hook(act, hook):\n"," cache[hook.name] = act.detach()\n","\n"," model.add_hook(filter_not_qkv_input, forward_cache_hook, \"fwd\")\n","\n"," grad_cache = {}\n","\n"," def backward_cache_hook(act, hook):\n"," grad_cache[hook.name] = act.detach()\n","\n"," model.add_hook(filter_not_qkv_input, backward_cache_hook, \"bwd\")\n","\n"," value = metric(model(tokens))\n"," value.backward()\n"," model.reset_hooks()\n"," return (\n"," value.item(),\n"," ActivationCache(cache, model),\n"," ActivationCache(grad_cache, model),\n"," )\n","\n","\n","clean_value, clean_cache, clean_grad_cache = get_cache_fwd_and_bwd(\n"," model, clean_tokens, ioi_metric\n",")\n","print(\"Clean Value:\", clean_value)\n","print(\"Clean Activations Cached:\", len(clean_cache))\n","print(\"Clean Gradients Cached:\", len(clean_grad_cache))\n","corrupted_value, corrupted_cache, corrupted_grad_cache = get_cache_fwd_and_bwd(\n"," model, corrupted_tokens, ioi_metric\n",")\n","print(\"Corrupted Value:\", corrupted_value)\n","print(\"Corrupted Activations Cached:\", len(corrupted_cache))\n","print(\"Corrupted Gradients Cached:\", len(corrupted_grad_cache))"]},{"cell_type":"markdown","metadata":{},"source":[" ### Attention Attribution\n"," The easiest thing to start with is to not even engage with the corrupted tokens/patching, but to look at the attribution of the attention patterns - that is, the linear approximation to what happens if you set each element of the attention pattern to zero. This, as it turns out, is a good proxy to what is going on with each head!\n"," Note that this is *not* the same as what we will later do with patching. In particular, this does not set up a careful counterfactual! It's a good tool for what's generally going on in this problem, but does not control for eg stuff that systematically boosts John > Mary in general, stuff that says \"I should activate the IOI circuit\", etc. Though using logit diff as our metric *does*\n"," Each element of the batch is independent and the metric is an average logit diff, so we can analyse each batch element independently here. We'll look at the first one, and then at the average across the whole batch (note - 4 prompts have indirect object before subject, 4 prompts have it the other way round, making the average pattern harder to interpret - I plot it over the first sequence of tokens as a mildly misleading reference).\n"," We can compare it to the interpretability in the wild diagram, and basically instantly recover most of the circuit!"]},{"cell_type":"code","execution_count":14,"metadata":{},"outputs":[],"source":["def create_attention_attr(\n"," clean_cache, clean_grad_cache\n",") -> TT[\"batch\", \"layer\", \"head_index\", \"dest\", \"src\"]:\n"," attention_stack = torch.stack(\n"," [clean_cache[\"pattern\", l] for l in range(model.cfg.n_layers)], dim=0\n"," )\n"," attention_grad_stack = torch.stack(\n"," [clean_grad_cache[\"pattern\", l] for l in range(model.cfg.n_layers)], dim=0\n"," )\n"," attention_attr = attention_grad_stack * attention_stack\n"," attention_attr = einops.rearrange(\n"," attention_attr,\n"," \"layer batch head_index dest src -> batch layer head_index dest src\",\n"," )\n"," return attention_attr\n","\n","\n","attention_attr = create_attention_attr(clean_cache, clean_grad_cache)"]},{"cell_type":"code","execution_count":15,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["['L0H0', 'L0H1', 'L0H2', 'L0H3', 'L0H4']\n","['L0H0+', 'L0H0-', 'L0H1+', 'L0H1-', 'L0H2+']\n","['L0H0Q', 'L0H0K', 'L0H0V', 'L0H1Q', 'L0H1K']\n"]}],"source":["HEAD_NAMES = [\n"," f\"L{l}H{h}\" for l in range(model.cfg.n_layers) for h in range(model.cfg.n_heads)\n","]\n","HEAD_NAMES_SIGNED = [f\"{name}{sign}\" for name in HEAD_NAMES for sign in [\"+\", \"-\"]]\n","HEAD_NAMES_QKV = [\n"," f\"{name}{act_name}\" for name in HEAD_NAMES for act_name in [\"Q\", \"K\", \"V\"]\n","]\n","print(HEAD_NAMES[:5])\n","print(HEAD_NAMES_SIGNED[:5])\n","print(HEAD_NAMES_QKV[:5])"]},{"cell_type":"markdown","metadata":{},"source":[" An extremely janky way to plot the attention attribution patterns. We scale them to be in [-1, 1], split each head into a positive and negative part (so all of it is in [0, 1]), and then plot the top 20 head-halves (a head can appear twice!) by the max value of the attribution pattern."]},{"cell_type":"code","execution_count":16,"metadata":{},"outputs":[{"data":{"text/markdown":["### Attention Attribution for first sequence"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n"," \n","\n"," \n","
\n"," \n"," \n"," "],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/markdown":["### Summed Attention Attribution for all sequences"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n"," \n","\n"," \n","
\n"," \n"," \n"," "],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Note: Plotted over first sequence for reference, but pairs have IO and S1 in different positions.\n"]}],"source":["def plot_attention_attr(attention_attr, tokens, top_k=20, index=0, title=\"\"):\n"," if len(tokens.shape) == 2:\n"," tokens = tokens[index]\n"," if len(attention_attr.shape) == 5:\n"," attention_attr = attention_attr[index]\n"," attention_attr_pos = attention_attr.clamp(min=-1e-5)\n"," attention_attr_neg = -attention_attr.clamp(max=1e-5)\n"," attention_attr_signed = torch.stack([attention_attr_pos, attention_attr_neg], dim=0)\n"," attention_attr_signed = einops.rearrange(\n"," attention_attr_signed,\n"," \"sign layer head_index dest src -> (layer head_index sign) dest src\",\n"," )\n"," attention_attr_signed = attention_attr_signed / attention_attr_signed.max()\n"," attention_attr_indices = (\n"," attention_attr_signed.max(-1).values.max(-1).values.argsort(descending=True)\n"," )\n"," # print(attention_attr_indices.shape)\n"," # print(attention_attr_indices)\n"," attention_attr_signed = attention_attr_signed[attention_attr_indices, :, :]\n"," head_labels = [HEAD_NAMES_SIGNED[i.item()] for i in attention_attr_indices]\n","\n"," if title:\n"," display(Markdown(\"### \" + title))\n"," display(\n"," pysvelte.AttentionMulti(\n"," tokens=model.to_str_tokens(tokens),\n"," attention=attention_attr_signed.permute(1, 2, 0)[:, :, :top_k],\n"," head_labels=head_labels[:top_k],\n"," )\n"," )\n","\n","\n","plot_attention_attr(\n"," attention_attr,\n"," clean_tokens,\n"," index=0,\n"," title=\"Attention Attribution for first sequence\",\n",")\n","\n","plot_attention_attr(\n"," attention_attr.sum(0),\n"," clean_tokens[0],\n"," title=\"Summed Attention Attribution for all sequences\",\n",")\n","print(\n"," \"Note: Plotted over first sequence for reference, but pairs have IO and S1 in different positions.\"\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" ## Attribution Patching\n"," In the following sections, I will implement various kinds of attribution patching, and then compare them to the activation patching patterns (activation patching code copied from [Exploratory Analysis Demo](https://neelnanda.io/exploratory-analysis-demo))\n"," ### Residual Stream Patching\n","
Note: We add up across both d_model and batch (Explanation).\n"," We add up along d_model because we're taking the dot product - the derivative *is* the linear map that locally linearly approximates the metric, and so we take the dot product of our change vector with the derivative vector. Equivalent, we look at the effect of changing each coordinate independently, and then combine them by adding it up - it's linear, so this totally works.\n"," We add up across batch because we're taking the average of the metric, so each individual batch element provides `1/batch_size` of the overall effect. Because each batch element is independent of the others and no information moves between activations for different inputs, the batched version is equivalent to doing attribution patching separately for each input, and then averaging - in this second version the metric per input is *not* divided by batch_size because we don't average.
"]},{"cell_type":"code","execution_count":17,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def attr_patch_residual(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n",") -> TT[\"component\", \"pos\"]:\n"," clean_residual, residual_labels = clean_cache.accumulated_resid(\n"," -1, incl_mid=True, return_labels=True\n"," )\n"," corrupted_residual = corrupted_cache.accumulated_resid(\n"," -1, incl_mid=True, return_labels=False\n"," )\n"," corrupted_grad_residual = corrupted_grad_cache.accumulated_resid(\n"," -1, incl_mid=True, return_labels=False\n"," )\n"," residual_attr = einops.reduce(\n"," corrupted_grad_residual * (clean_residual - corrupted_residual),\n"," \"component batch pos d_model -> component pos\",\n"," \"sum\",\n"," )\n"," return residual_attr, residual_labels\n","\n","\n","residual_attr, residual_labels = attr_patch_residual(\n"," clean_cache, corrupted_cache, corrupted_grad_cache\n",")\n","imshow(\n"," residual_attr,\n"," y=residual_labels,\n"," yaxis=\"Component\",\n"," xaxis=\"Position\",\n"," title=\"Residual Attribution Patching\",\n",")\n","\n","# ### Layer Output Patching"]},{"cell_type":"code","execution_count":18,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def attr_patch_layer_out(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n",") -> TT[\"component\", \"pos\"]:\n"," clean_layer_out, labels = clean_cache.decompose_resid(-1, return_labels=True)\n"," corrupted_layer_out = corrupted_cache.decompose_resid(-1, return_labels=False)\n"," corrupted_grad_layer_out = corrupted_grad_cache.decompose_resid(\n"," -1, return_labels=False\n"," )\n"," layer_out_attr = einops.reduce(\n"," corrupted_grad_layer_out * (clean_layer_out - corrupted_layer_out),\n"," \"component batch pos d_model -> component pos\",\n"," \"sum\",\n"," )\n"," return layer_out_attr, labels\n","\n","\n","layer_out_attr, layer_out_labels = attr_patch_layer_out(\n"," clean_cache, corrupted_cache, corrupted_grad_cache\n",")\n","imshow(\n"," layer_out_attr,\n"," y=layer_out_labels,\n"," yaxis=\"Component\",\n"," xaxis=\"Position\",\n"," title=\"Layer Output Attribution Patching\",\n",")"]},{"cell_type":"code","execution_count":19,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def attr_patch_head_out(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n",") -> TT[\"component\", \"pos\"]:\n"," labels = HEAD_NAMES\n","\n"," clean_head_out = clean_cache.stack_head_results(-1, return_labels=False)\n"," corrupted_head_out = corrupted_cache.stack_head_results(-1, return_labels=False)\n"," corrupted_grad_head_out = corrupted_grad_cache.stack_head_results(\n"," -1, return_labels=False\n"," )\n"," head_out_attr = einops.reduce(\n"," corrupted_grad_head_out * (clean_head_out - corrupted_head_out),\n"," \"component batch pos d_model -> component pos\",\n"," \"sum\",\n"," )\n"," return head_out_attr, labels\n","\n","\n","head_out_attr, head_out_labels = attr_patch_head_out(\n"," clean_cache, corrupted_cache, corrupted_grad_cache\n",")\n","imshow(\n"," head_out_attr,\n"," y=head_out_labels,\n"," yaxis=\"Component\",\n"," xaxis=\"Position\",\n"," title=\"Head Output Attribution Patching\",\n",")\n","sum_head_out_attr = einops.reduce(\n"," head_out_attr,\n"," \"(layer head) pos -> layer head\",\n"," \"sum\",\n"," layer=model.cfg.n_layers,\n"," head=model.cfg.n_heads,\n",")\n","imshow(\n"," sum_head_out_attr,\n"," yaxis=\"Layer\",\n"," xaxis=\"Head Index\",\n"," title=\"Head Output Attribution Patching Sum Over Pos\",\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" ### Head Activation Patching\n"," Intuitively, a head has three inputs, keys, queries and values. We can patch each of these individually to get a sense for where the important part of each head's input comes from!\n"," As a sanity check, we also do this for the mixed value. The result is a linear map of this (`z @ W_O == result`), so this is the same as patching the output of the head.\n"," We plot both the patch for each head over each position, and summed over position (it tends to be pretty sparse, so the latter is the same)"]},{"cell_type":"code","execution_count":20,"metadata":{},"outputs":[{"data":{"text/markdown":["#### Key Head Vector Attribution Patching"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/markdown":["#### Query Head Vector Attribution Patching"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/markdown":["#### Value Head Vector Attribution Patching"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/markdown":["#### Mixed Value Head Vector Attribution Patching"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["from typing_extensions import Literal\n","\n","\n","def stack_head_vector_from_cache(\n"," cache, activation_name: Literal[\"q\", \"k\", \"v\", \"z\"]\n",") -> TT[\"layer_and_head_index\", \"batch\", \"pos\", \"d_head\"]:\n"," \"\"\"Stacks the head vectors from the cache from a specific activation (key, query, value or mixed_value (z)) into a single tensor.\"\"\"\n"," stacked_head_vectors = torch.stack(\n"," [cache[activation_name, l] for l in range(model.cfg.n_layers)], dim=0\n"," )\n"," stacked_head_vectors = einops.rearrange(\n"," stacked_head_vectors,\n"," \"layer batch pos head_index d_head -> (layer head_index) batch pos d_head\",\n"," )\n"," return stacked_head_vectors\n","\n","\n","def attr_patch_head_vector(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n"," activation_name: Literal[\"q\", \"k\", \"v\", \"z\"],\n",") -> TT[\"component\", \"pos\"]:\n"," labels = HEAD_NAMES\n","\n"," clean_head_vector = stack_head_vector_from_cache(clean_cache, activation_name)\n"," corrupted_head_vector = stack_head_vector_from_cache(\n"," corrupted_cache, activation_name\n"," )\n"," corrupted_grad_head_vector = stack_head_vector_from_cache(\n"," corrupted_grad_cache, activation_name\n"," )\n"," head_vector_attr = einops.reduce(\n"," corrupted_grad_head_vector * (clean_head_vector - corrupted_head_vector),\n"," \"component batch pos d_head -> component pos\",\n"," \"sum\",\n"," )\n"," return head_vector_attr, labels\n","\n","\n","head_vector_attr_dict = {}\n","for activation_name, activation_name_full in [\n"," (\"k\", \"Key\"),\n"," (\"q\", \"Query\"),\n"," (\"v\", \"Value\"),\n"," (\"z\", \"Mixed Value\"),\n","]:\n"," display(Markdown(f\"#### {activation_name_full} Head Vector Attribution Patching\"))\n"," head_vector_attr_dict[activation_name], head_vector_labels = attr_patch_head_vector(\n"," clean_cache, corrupted_cache, corrupted_grad_cache, activation_name\n"," )\n"," imshow(\n"," head_vector_attr_dict[activation_name],\n"," y=head_vector_labels,\n"," yaxis=\"Component\",\n"," xaxis=\"Position\",\n"," title=f\"{activation_name_full} Attribution Patching\",\n"," )\n"," sum_head_vector_attr = einops.reduce(\n"," head_vector_attr_dict[activation_name],\n"," \"(layer head) pos -> layer head\",\n"," \"sum\",\n"," layer=model.cfg.n_layers,\n"," head=model.cfg.n_heads,\n"," )\n"," imshow(\n"," sum_head_vector_attr,\n"," yaxis=\"Layer\",\n"," xaxis=\"Head Index\",\n"," title=f\"{activation_name_full} Attribution Patching Sum Over Pos\",\n"," )"]},{"cell_type":"code","execution_count":21,"metadata":{},"outputs":[{"data":{"text/markdown":["### Head Pattern Attribution Patching"],"text/plain":[""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n"," \n","\n"," \n","
\n"," \n"," \n"," "],"text/plain":[""]},"metadata":{},"output_type":"display_data"}],"source":["from typing_extensions import Literal\n","\n","\n","def stack_head_pattern_from_cache(\n"," cache,\n",") -> TT[\"layer_and_head_index\", \"batch\", \"dest_pos\", \"src_pos\"]:\n"," \"\"\"Stacks the head patterns from the cache into a single tensor.\"\"\"\n"," stacked_head_pattern = torch.stack(\n"," [cache[\"pattern\", l] for l in range(model.cfg.n_layers)], dim=0\n"," )\n"," stacked_head_pattern = einops.rearrange(\n"," stacked_head_pattern,\n"," \"layer batch head_index dest_pos src_pos -> (layer head_index) batch dest_pos src_pos\",\n"," )\n"," return stacked_head_pattern\n","\n","\n","def attr_patch_head_pattern(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n",") -> TT[\"component\", \"dest_pos\", \"src_pos\"]:\n"," labels = HEAD_NAMES\n","\n"," clean_head_pattern = stack_head_pattern_from_cache(clean_cache)\n"," corrupted_head_pattern = stack_head_pattern_from_cache(corrupted_cache)\n"," corrupted_grad_head_pattern = stack_head_pattern_from_cache(corrupted_grad_cache)\n"," head_pattern_attr = einops.reduce(\n"," corrupted_grad_head_pattern * (clean_head_pattern - corrupted_head_pattern),\n"," \"component batch dest_pos src_pos -> component dest_pos src_pos\",\n"," \"sum\",\n"," )\n"," return head_pattern_attr, labels\n","\n","\n","head_pattern_attr, labels = attr_patch_head_pattern(\n"," clean_cache, corrupted_cache, corrupted_grad_cache\n",")\n","\n","plot_attention_attr(\n"," einops.rearrange(\n"," head_pattern_attr,\n"," \"(layer head) dest src -> layer head dest src\",\n"," layer=model.cfg.n_layers,\n"," head=model.cfg.n_heads,\n"," ),\n"," clean_tokens,\n"," index=0,\n"," title=\"Head Pattern Attribution Patching\",\n",")"]},{"cell_type":"code","execution_count":22,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def get_head_vector_grad_input_from_grad_cache(\n"," grad_cache: ActivationCache, activation_name: Literal[\"q\", \"k\", \"v\"], layer: int\n",") -> TT[\"batch\", \"pos\", \"head_index\", \"d_model\"]:\n"," vector_grad = grad_cache[activation_name, layer]\n"," ln_scales = grad_cache[\"scale\", layer, \"ln1\"]\n"," attn_layer_object = model.blocks[layer].attn\n"," if activation_name == \"q\":\n"," W = attn_layer_object.W_Q\n"," elif activation_name == \"k\":\n"," W = attn_layer_object.W_K\n"," elif activation_name == \"v\":\n"," W = attn_layer_object.W_V\n"," else:\n"," raise ValueError(\"Invalid activation name\")\n","\n"," return einsum(\n"," \"batch pos head_index d_head, batch pos, head_index d_model d_head -> batch pos head_index d_model\",\n"," vector_grad,\n"," ln_scales.squeeze(-1),\n"," W,\n"," )\n","\n","\n","def get_stacked_head_vector_grad_input(\n"," grad_cache, activation_name: Literal[\"q\", \"k\", \"v\"]\n",") -> TT[\"layer\", \"batch\", \"pos\", \"head_index\", \"d_model\"]:\n"," return torch.stack(\n"," [\n"," get_head_vector_grad_input_from_grad_cache(grad_cache, activation_name, l)\n"," for l in range(model.cfg.n_layers)\n"," ],\n"," dim=0,\n"," )\n","\n","\n","def get_full_vector_grad_input(\n"," grad_cache,\n",") -> TT[\"qkv\", \"layer\", \"batch\", \"pos\", \"head_index\", \"d_model\"]:\n"," return torch.stack(\n"," [\n"," get_stacked_head_vector_grad_input(grad_cache, activation_name)\n"," for activation_name in [\"q\", \"k\", \"v\"]\n"," ],\n"," dim=0,\n"," )\n","\n","\n","def attr_patch_head_path(\n"," clean_cache: ActivationCache,\n"," corrupted_cache: ActivationCache,\n"," corrupted_grad_cache: ActivationCache,\n",") -> TT[\"qkv\", \"dest_component\", \"src_component\", \"pos\"]:\n"," \"\"\"\n"," Computes the attribution patch along the path between each pair of heads.\n","\n"," Sets this to zero for the path from any late head to any early head\n","\n"," \"\"\"\n"," start_labels = HEAD_NAMES\n"," end_labels = HEAD_NAMES_QKV\n"," full_vector_grad_input = get_full_vector_grad_input(corrupted_grad_cache)\n"," clean_head_result_stack = clean_cache.stack_head_results(-1)\n"," corrupted_head_result_stack = corrupted_cache.stack_head_results(-1)\n"," diff_head_result = einops.rearrange(\n"," clean_head_result_stack - corrupted_head_result_stack,\n"," \"(layer head_index) batch pos d_model -> layer batch pos head_index d_model\",\n"," layer=model.cfg.n_layers,\n"," head_index=model.cfg.n_heads,\n"," )\n"," path_attr = einsum(\n"," \"qkv layer_end batch pos head_end d_model, layer_start batch pos head_start d_model -> qkv layer_end head_end layer_start head_start pos\",\n"," full_vector_grad_input,\n"," diff_head_result,\n"," )\n"," correct_layer_order_mask = (\n"," torch.arange(model.cfg.n_layers)[None, :, None, None, None, None]\n"," > torch.arange(model.cfg.n_layers)[None, None, None, :, None, None]\n"," ).to(path_attr.device)\n"," zero = torch.zeros(1, device=path_attr.device)\n"," path_attr = torch.where(correct_layer_order_mask, path_attr, zero)\n","\n"," path_attr = einops.rearrange(\n"," path_attr,\n"," \"qkv layer_end head_end layer_start head_start pos -> (layer_end head_end qkv) (layer_start head_start) pos\",\n"," )\n"," return path_attr, end_labels, start_labels\n","\n","\n","head_path_attr, end_labels, start_labels = attr_patch_head_path(\n"," clean_cache, corrupted_cache, corrupted_grad_cache\n",")\n","imshow(\n"," head_path_attr.sum(-1),\n"," y=end_labels,\n"," yaxis=\"Path End (Head Input)\",\n"," x=start_labels,\n"," xaxis=\"Path Start (Head Output)\",\n"," title=\"Head Path Attribution Patching\",\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" This is hard to parse. Here's an experiment with filtering for the most important heads and showing their paths."]},{"cell_type":"code","execution_count":23,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["head_out_values, head_out_indices = head_out_attr.sum(-1).abs().sort(descending=True)\n","line(head_out_values)\n","top_head_indices = head_out_indices[:22].sort().values\n","top_end_indices = []\n","top_end_labels = []\n","top_start_indices = []\n","top_start_labels = []\n","for i in top_head_indices:\n"," i = i.item()\n"," top_start_indices.append(i)\n"," top_start_labels.append(start_labels[i])\n"," for j in range(3):\n"," top_end_indices.append(3 * i + j)\n"," top_end_labels.append(end_labels[3 * i + j])\n","\n","imshow(\n"," head_path_attr[top_end_indices, :][:, top_start_indices].sum(-1),\n"," y=top_end_labels,\n"," yaxis=\"Path End (Head Input)\",\n"," x=top_start_labels,\n"," xaxis=\"Path Start (Head Output)\",\n"," title=\"Head Path Attribution Patching (Filtered for Top Heads)\",\n",")"]},{"cell_type":"code","execution_count":24,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["for j, composition_type in enumerate([\"Query\", \"Key\", \"Value\"]):\n"," imshow(\n"," head_path_attr[top_end_indices, :][:, top_start_indices][j::3].sum(-1),\n"," y=top_end_labels[j::3],\n"," yaxis=\"Path End (Head Input)\",\n"," x=top_start_labels,\n"," xaxis=\"Path Start (Head Output)\",\n"," title=f\"Head Path to {composition_type} Attribution Patching (Filtered for Top Heads)\",\n"," )"]},{"cell_type":"code","execution_count":25,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["top_head_path_attr = einops.rearrange(\n"," head_path_attr[top_end_indices, :][:, top_start_indices].sum(-1),\n"," \"(head_end qkv) head_start -> qkv head_end head_start\",\n"," qkv=3,\n",")\n","imshow(\n"," top_head_path_attr,\n"," y=[i[:-1] for i in top_end_labels[::3]],\n"," yaxis=\"Path End (Head Input)\",\n"," x=top_start_labels,\n"," xaxis=\"Path Start (Head Output)\",\n"," title=f\"Head Path Attribution Patching (Filtered for Top Heads)\",\n"," facet_col=0,\n"," facet_labels=[\"Query\", \"Key\", \"Value\"],\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" Let's now dive into 3 interesting heads: L5H5 (induction head), L8H6 (S-Inhibition Head), L9H9 (Name Mover) and look at their input and output paths (note - Q input means )"]},{"cell_type":"code","execution_count":26,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["interesting_heads = [\n"," 5 * model.cfg.n_heads + 5,\n"," 8 * model.cfg.n_heads + 6,\n"," 9 * model.cfg.n_heads + 9,\n","]\n","interesting_head_labels = [HEAD_NAMES[i] for i in interesting_heads]\n","for head_index, label in zip(interesting_heads, interesting_head_labels):\n"," in_paths = head_path_attr[3 * head_index : 3 * head_index + 3].sum(-1)\n"," out_paths = head_path_attr[:, head_index].sum(-1)\n"," out_paths = einops.rearrange(out_paths, \"(layer_head qkv) -> qkv layer_head\", qkv=3)\n"," all_paths = torch.cat([in_paths, out_paths], dim=0)\n"," all_paths = einops.rearrange(\n"," all_paths,\n"," \"path_type (layer head) -> path_type layer head\",\n"," layer=model.cfg.n_layers,\n"," head=model.cfg.n_heads,\n"," )\n"," imshow(\n"," all_paths,\n"," facet_col=0,\n"," facet_labels=[\n"," \"Query (In)\",\n"," \"Key (In)\",\n"," \"Value (In)\",\n"," \"Query (Out)\",\n"," \"Key (Out)\",\n"," \"Value (Out)\",\n"," ],\n"," title=f\"Input and Output Paths for head {label}\",\n"," yaxis=\"Layer\",\n"," xaxis=\"Head\",\n"," )"]},{"cell_type":"markdown","metadata":{},"source":[" ## Validating Attribution vs Activation Patching\n"," Let's now compare attribution and activation patching. Generally it's a decent approximation! The main place it fails is MLP0 and the residual stream\n"," My fuzzy intuition is that attribution patching works badly for \"big\" things which are poorly modelled as linear approximations, and works well for \"small\" things which are more like incremental changes. Anything involving replacing the embedding is a \"big\" thing, which includes residual streams, and in GPT-2 small MLP0 seems to be used as an \"extended embedding\" (where later layers use MLP0's output instead of the token embedding), so I also count it as big.\n"," See more discussion in the accompanying blog post!\n"]},{"cell_type":"markdown","metadata":{},"source":[" First do some refactoring to make attribution patching more generic. We make an attribution cache, which is an ActivationCache where each element is (clean_act - corrupted_act) * corrupted_grad, so that it's the per-element attribution for each activation. Thanks to linearity, we just compute things by adding stuff up along the relevant dimensions!"]},{"cell_type":"code","execution_count":27,"metadata":{},"outputs":[],"source":["attribution_cache_dict = {}\n","for key in corrupted_grad_cache.cache_dict.keys():\n"," attribution_cache_dict[key] = corrupted_grad_cache.cache_dict[key] * (\n"," clean_cache.cache_dict[key] - corrupted_cache.cache_dict[key]\n"," )\n","attr_cache = ActivationCache(attribution_cache_dict, model)"]},{"cell_type":"markdown","metadata":{},"source":[" By block: For each head we patch the starting residual stream, attention output + MLP output"]},{"cell_type":"code","execution_count":28,"metadata":{},"outputs":[],"source":["str_tokens = model.to_str_tokens(clean_tokens[0])\n","context_length = len(str_tokens)"]},{"cell_type":"code","execution_count":29,"metadata":{},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"95a5290e11b64b6a95ef5dd37d027c7a","version_major":2,"version_minor":0},"text/plain":[" 0%| | 0/180 [00:00\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["every_block_act_patch_result = patching.get_act_patch_block_every(\n"," model, corrupted_tokens, clean_cache, ioi_metric\n",")\n","imshow(\n"," every_block_act_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Residual Stream\", \"Attn Output\", \"MLP Output\"],\n"," title=\"Activation Patching Per Block\",\n"," xaxis=\"Position\",\n"," yaxis=\"Layer\",\n"," zmax=1,\n"," zmin=-1,\n"," x=[f\"{tok}_{i}\" for i, tok in enumerate(model.to_str_tokens(clean_tokens[0]))],\n",")"]},{"cell_type":"code","execution_count":30,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def get_attr_patch_block_every(attr_cache):\n"," resid_pre_attr = einops.reduce(\n"," attr_cache.stack_activation(\"resid_pre\"),\n"," \"layer batch pos d_model -> layer pos\",\n"," \"sum\",\n"," )\n"," attn_out_attr = einops.reduce(\n"," attr_cache.stack_activation(\"attn_out\"),\n"," \"layer batch pos d_model -> layer pos\",\n"," \"sum\",\n"," )\n"," mlp_out_attr = einops.reduce(\n"," attr_cache.stack_activation(\"mlp_out\"),\n"," \"layer batch pos d_model -> layer pos\",\n"," \"sum\",\n"," )\n","\n"," every_block_attr_patch_result = torch.stack(\n"," [resid_pre_attr, attn_out_attr, mlp_out_attr], dim=0\n"," )\n"," return every_block_attr_patch_result\n","\n","\n","every_block_attr_patch_result = get_attr_patch_block_every(attr_cache)\n","imshow(\n"," every_block_attr_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Residual Stream\", \"Attn Output\", \"MLP Output\"],\n"," title=\"Attribution Patching Per Block\",\n"," xaxis=\"Position\",\n"," yaxis=\"Layer\",\n"," zmax=1,\n"," zmin=-1,\n"," x=[f\"{tok}_{i}\" for i, tok in enumerate(model.to_str_tokens(clean_tokens[0]))],\n",")"]},{"cell_type":"code","execution_count":31,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["scatter(\n"," y=every_block_attr_patch_result.reshape(3, -1),\n"," x=every_block_act_patch_result.reshape(3, -1),\n"," facet_col=0,\n"," facet_labels=[\"Residual Stream\", \"Attn Output\", \"MLP Output\"],\n"," title=\"Attribution vs Activation Patching Per Block\",\n"," xaxis=\"Activation Patch\",\n"," yaxis=\"Attribution Patch\",\n"," hover=[\n"," f\"Layer {l}, Position {p}, |{str_tokens[p]}|\"\n"," for l in range(model.cfg.n_layers)\n"," for p in range(context_length)\n"," ],\n"," color=einops.repeat(\n"," torch.arange(model.cfg.n_layers), \"layer -> (layer pos)\", pos=context_length\n"," ),\n"," color_continuous_scale=\"Portland\",\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" By head: For each head we patch the output, query, key, value or pattern. We do all positions at once so it's not super slow."]},{"cell_type":"code","execution_count":32,"metadata":{},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"18b2e6b0985b40cd8c0cd1a16ba62975","version_major":2,"version_minor":0},"text/plain":[" 0%| | 0/144 [00:00\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["every_head_all_pos_act_patch_result = patching.get_act_patch_attn_head_all_pos_every(\n"," model, corrupted_tokens, clean_cache, ioi_metric\n",")\n","imshow(\n"," every_head_all_pos_act_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Activation Patching Per Head (All Pos)\",\n"," xaxis=\"Head\",\n"," yaxis=\"Layer\",\n"," zmax=1,\n"," zmin=-1,\n",")"]},{"cell_type":"code","execution_count":33,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def get_attr_patch_attn_head_all_pos_every(attr_cache):\n"," head_out_all_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"z\"),\n"," \"layer batch pos head_index d_head -> layer head_index\",\n"," \"sum\",\n"," )\n"," head_q_all_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"q\"),\n"," \"layer batch pos head_index d_head -> layer head_index\",\n"," \"sum\",\n"," )\n"," head_k_all_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"k\"),\n"," \"layer batch pos head_index d_head -> layer head_index\",\n"," \"sum\",\n"," )\n"," head_v_all_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"v\"),\n"," \"layer batch pos head_index d_head -> layer head_index\",\n"," \"sum\",\n"," )\n"," head_pattern_all_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"pattern\"),\n"," \"layer batch head_index dest_pos src_pos -> layer head_index\",\n"," \"sum\",\n"," )\n","\n"," return torch.stack(\n"," [\n"," head_out_all_pos_attr,\n"," head_q_all_pos_attr,\n"," head_k_all_pos_attr,\n"," head_v_all_pos_attr,\n"," head_pattern_all_pos_attr,\n"," ]\n"," )\n","\n","\n","every_head_all_pos_attr_patch_result = get_attr_patch_attn_head_all_pos_every(\n"," attr_cache\n",")\n","imshow(\n"," every_head_all_pos_attr_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Attribution Patching Per Head (All Pos)\",\n"," xaxis=\"Head\",\n"," yaxis=\"Layer\",\n"," zmax=1,\n"," zmin=-1,\n",")"]},{"cell_type":"code","execution_count":34,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["scatter(\n"," y=every_head_all_pos_attr_patch_result.reshape(5, -1),\n"," x=every_head_all_pos_act_patch_result.reshape(5, -1),\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Attribution vs Activation Patching Per Head (All Pos)\",\n"," xaxis=\"Activation Patch\",\n"," yaxis=\"Attribution Patch\",\n"," include_diag=True,\n"," hover=head_out_labels,\n"," color=einops.repeat(\n"," torch.arange(model.cfg.n_layers),\n"," \"layer -> (layer head)\",\n"," head=model.cfg.n_heads,\n"," ),\n"," color_continuous_scale=\"Portland\",\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" We see pretty good results in general, but significant errors for heads L5H5 on query and moderate errors for head L10H7 on query and key, and moderate errors for head L11H10 on key. But each of these is fine for pattern and output. My guess is that the problem is that these have pretty saturated attention on a single token, and the linear approximation is thus not great on the attention calculation here, but I'm not sure. When we plot the attention patterns, we do see this!\n"," Note that the axis labels are for the *first* prompt's tokens, but each facet is a different prompt, so this is somewhat inaccurate. In particular, every odd facet has indirect object and subject in the opposite order (IO first). But otherwise everything lines up between the prompts"]},{"cell_type":"code","execution_count":35,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"},{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["graph_tok_labels = [\n"," f\"{tok}_{i}\" for i, tok in enumerate(model.to_str_tokens(clean_tokens[0]))\n","]\n","imshow(\n"," clean_cache[\"pattern\", 5][:, 5],\n"," x=graph_tok_labels,\n"," y=graph_tok_labels,\n"," facet_col=0,\n"," title=\"Attention for Head L5H5\",\n"," facet_name=\"Prompt\",\n",")\n","imshow(\n"," clean_cache[\"pattern\", 10][:, 7],\n"," x=graph_tok_labels,\n"," y=graph_tok_labels,\n"," facet_col=0,\n"," title=\"Attention for Head L10H7\",\n"," facet_name=\"Prompt\",\n",")\n","imshow(\n"," clean_cache[\"pattern\", 11][:, 10],\n"," x=graph_tok_labels,\n"," y=graph_tok_labels,\n"," facet_col=0,\n"," title=\"Attention for Head L11H10\",\n"," facet_name=\"Prompt\",\n",")\n","\n","\n","# [markdown]"]},{"cell_type":"code","execution_count":36,"metadata":{},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"06f39489001845849fbc7446a07066f4","version_major":2,"version_minor":0},"text/plain":[" 0%| | 0/2160 [00:00\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["every_head_by_pos_act_patch_result = patching.get_act_patch_attn_head_by_pos_every(\n"," model, corrupted_tokens, clean_cache, ioi_metric\n",")\n","every_head_by_pos_act_patch_result = einops.rearrange(\n"," every_head_by_pos_act_patch_result,\n"," \"act_type layer pos head -> act_type (layer head) pos\",\n",")\n","imshow(\n"," every_head_by_pos_act_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Activation Patching Per Head (By Pos)\",\n"," xaxis=\"Position\",\n"," yaxis=\"Layer & Head\",\n"," zmax=1,\n"," zmin=-1,\n"," x=[f\"{tok}_{i}\" for i, tok in enumerate(model.to_str_tokens(clean_tokens[0]))],\n"," y=head_out_labels,\n",")"]},{"cell_type":"code","execution_count":37,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def get_attr_patch_attn_head_by_pos_every(attr_cache):\n"," head_out_by_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"z\"),\n"," \"layer batch pos head_index d_head -> layer pos head_index\",\n"," \"sum\",\n"," )\n"," head_q_by_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"q\"),\n"," \"layer batch pos head_index d_head -> layer pos head_index\",\n"," \"sum\",\n"," )\n"," head_k_by_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"k\"),\n"," \"layer batch pos head_index d_head -> layer pos head_index\",\n"," \"sum\",\n"," )\n"," head_v_by_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"v\"),\n"," \"layer batch pos head_index d_head -> layer pos head_index\",\n"," \"sum\",\n"," )\n"," head_pattern_by_pos_attr = einops.reduce(\n"," attr_cache.stack_activation(\"pattern\"),\n"," \"layer batch head_index dest_pos src_pos -> layer dest_pos head_index\",\n"," \"sum\",\n"," )\n","\n"," return torch.stack(\n"," [\n"," head_out_by_pos_attr,\n"," head_q_by_pos_attr,\n"," head_k_by_pos_attr,\n"," head_v_by_pos_attr,\n"," head_pattern_by_pos_attr,\n"," ]\n"," )\n","\n","\n","every_head_by_pos_attr_patch_result = get_attr_patch_attn_head_by_pos_every(attr_cache)\n","every_head_by_pos_attr_patch_result = einops.rearrange(\n"," every_head_by_pos_attr_patch_result,\n"," \"act_type layer pos head -> act_type (layer head) pos\",\n",")\n","imshow(\n"," every_head_by_pos_attr_patch_result,\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Attribution Patching Per Head (By Pos)\",\n"," xaxis=\"Position\",\n"," yaxis=\"Layer & Head\",\n"," zmax=1,\n"," zmin=-1,\n"," x=[f\"{tok}_{i}\" for i, tok in enumerate(model.to_str_tokens(clean_tokens[0]))],\n"," y=head_out_labels,\n",")"]},{"cell_type":"code","execution_count":38,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["scatter(\n"," y=every_head_by_pos_attr_patch_result.reshape(5, -1),\n"," x=every_head_by_pos_act_patch_result.reshape(5, -1),\n"," facet_col=0,\n"," facet_labels=[\"Output\", \"Query\", \"Key\", \"Value\", \"Pattern\"],\n"," title=\"Attribution vs Activation Patching Per Head (by Pos)\",\n"," xaxis=\"Activation Patch\",\n"," yaxis=\"Attribution Patch\",\n"," include_diag=True,\n"," hover=[f\"{label} {tok}\" for label in head_out_labels for tok in graph_tok_labels],\n"," color=einops.repeat(\n"," torch.arange(model.cfg.n_layers),\n"," \"layer -> (layer head pos)\",\n"," head=model.cfg.n_heads,\n"," pos=15,\n"," ),\n"," color_continuous_scale=\"Portland\",\n",")"]},{"cell_type":"markdown","metadata":{},"source":[" ## Factual Knowledge Patching Example\n"," Incomplete, but maybe of interest!\n"," Note that I have better results with the corrupted prompt as having random words rather than Colosseum."]},{"cell_type":"code","execution_count":39,"metadata":{},"outputs":[{"name":"stderr","output_type":"stream","text":["Using pad_token, but it is not set yet.\n"]},{"name":"stdout","output_type":"stream","text":["Loaded pretrained model gpt2-xl into HookedTransformer\n","Tokenized prompt: ['<|endoftext|>', 'The', ' E', 'iff', 'el', ' Tower', ' is', ' located', ' in', ' the', ' city', ' of']\n","Tokenized answer: [' Paris']\n"]},{"data":{"text/html":["
Performance on answer token:\n","Rank: 0        Logit: 20.73 Prob: 95.80% Token: | Paris|\n","
\n"],"text/plain":["Performance on answer token:\n","\u001b[1mRank: \u001b[0m\u001b[1;36m0\u001b[0m\u001b[1m Logit: \u001b[0m\u001b[1;36m20.73\u001b[0m\u001b[1m Prob: \u001b[0m\u001b[1;36m95.80\u001b[0m\u001b[1m% Token: | Paris|\u001b[0m\n"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Top 0th token. Logit: 20.73 Prob: 95.80% Token: | Paris|\n","Top 1th token. Logit: 16.49 Prob: 1.39% Token: | E|\n","Top 2th token. Logit: 14.69 Prob: 0.23% Token: | the|\n","Top 3th token. Logit: 14.58 Prob: 0.21% Token: | É|\n","Top 4th token. Logit: 14.44 Prob: 0.18% Token: | France|\n","Top 5th token. Logit: 14.36 Prob: 0.16% Token: | Mont|\n","Top 6th token. Logit: 13.77 Prob: 0.09% Token: | Le|\n","Top 7th token. Logit: 13.66 Prob: 0.08% Token: | Ang|\n","Top 8th token. Logit: 13.43 Prob: 0.06% Token: | V|\n","Top 9th token. Logit: 13.42 Prob: 0.06% Token: | Stras|\n"]},{"data":{"text/html":["
Ranks of the answer tokens: [(' Paris', 0)]\n","
\n"],"text/plain":["\u001b[1mRanks of the answer tokens:\u001b[0m \u001b[1m[\u001b[0m\u001b[1m(\u001b[0m\u001b[32m' Paris'\u001b[0m, \u001b[1;36m0\u001b[0m\u001b[1m)\u001b[0m\u001b[1m]\u001b[0m\n"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Tokenized prompt: ['<|endoftext|>', 'The', ' Col', 'os', 'se', 'um', ' is', ' located', ' in', ' the', ' city', ' of']\n","Tokenized answer: [' Rome']\n"]},{"data":{"text/html":["
Performance on answer token:\n","Rank: 0        Logit: 20.02 Prob: 83.70% Token: | Rome|\n","
\n"],"text/plain":["Performance on answer token:\n","\u001b[1mRank: \u001b[0m\u001b[1;36m0\u001b[0m\u001b[1m Logit: \u001b[0m\u001b[1;36m20.02\u001b[0m\u001b[1m Prob: \u001b[0m\u001b[1;36m83.70\u001b[0m\u001b[1m% Token: | Rome|\u001b[0m\n"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Top 0th token. Logit: 20.02 Prob: 83.70% Token: | Rome|\n","Top 1th token. Logit: 17.03 Prob: 4.23% Token: | Naples|\n","Top 2th token. Logit: 16.85 Prob: 3.51% Token: | Pompe|\n","Top 3th token. Logit: 16.14 Prob: 1.73% Token: | Ver|\n","Top 4th token. Logit: 15.87 Prob: 1.32% Token: | Florence|\n","Top 5th token. Logit: 14.77 Prob: 0.44% Token: | Roma|\n","Top 6th token. Logit: 14.68 Prob: 0.40% Token: | Milan|\n","Top 7th token. Logit: 14.66 Prob: 0.39% Token: | ancient|\n","Top 8th token. Logit: 14.37 Prob: 0.29% Token: | Pal|\n","Top 9th token. Logit: 14.30 Prob: 0.27% Token: | Constantinople|\n"]},{"data":{"text/html":["
Ranks of the answer tokens: [(' Rome', 0)]\n","
\n"],"text/plain":["\u001b[1mRanks of the answer tokens:\u001b[0m \u001b[1m[\u001b[0m\u001b[1m(\u001b[0m\u001b[32m' Rome'\u001b[0m, \u001b[1;36m0\u001b[0m\u001b[1m)\u001b[0m\u001b[1m]\u001b[0m\n"]},"metadata":{},"output_type":"display_data"}],"source":["gpt2_xl = HookedTransformer.from_pretrained(\"gpt2-xl\")\n","clean_prompt = \"The Eiffel Tower is located in the city of\"\n","clean_answer = \" Paris\"\n","# corrupted_prompt = \"The red brown fox jumps is located in the city of\"\n","corrupted_prompt = \"The Colosseum is located in the city of\"\n","corrupted_answer = \" Rome\"\n","utils.test_prompt(clean_prompt, clean_answer, gpt2_xl)\n","utils.test_prompt(corrupted_prompt, corrupted_answer, gpt2_xl)"]},{"cell_type":"code","execution_count":40,"metadata":{},"outputs":[],"source":["clean_answer_index = gpt2_xl.to_single_token(clean_answer)\n","corrupted_answer_index = gpt2_xl.to_single_token(corrupted_answer)\n","\n","\n","def factual_logit_diff(logits: TT[\"batch\", \"position\", \"d_vocab\"]):\n"," return logits[0, -1, clean_answer_index] - logits[0, -1, corrupted_answer_index]"]},{"cell_type":"code","execution_count":41,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean logit diff: 10.634519577026367\n","Corrupted logit diff: -8.988396644592285\n","Clean Metric: tensor(1., device='cuda:0', grad_fn=)\n","Corrupted Metric: tensor(0., device='cuda:0', grad_fn=)\n"]}],"source":["clean_logits, clean_cache = gpt2_xl.run_with_cache(clean_prompt)\n","CLEAN_LOGIT_DIFF_FACTUAL = factual_logit_diff(clean_logits).item()\n","corrupted_logits, _ = gpt2_xl.run_with_cache(corrupted_prompt)\n","CORRUPTED_LOGIT_DIFF_FACTUAL = factual_logit_diff(corrupted_logits).item()\n","\n","\n","def factual_metric(logits: TT[\"batch\", \"position\", \"d_vocab\"]):\n"," return (factual_logit_diff(logits) - CORRUPTED_LOGIT_DIFF_FACTUAL) / (\n"," CLEAN_LOGIT_DIFF_FACTUAL - CORRUPTED_LOGIT_DIFF_FACTUAL\n"," )\n","\n","\n","print(\"Clean logit diff:\", CLEAN_LOGIT_DIFF_FACTUAL)\n","print(\"Corrupted logit diff:\", CORRUPTED_LOGIT_DIFF_FACTUAL)\n","print(\"Clean Metric:\", factual_metric(clean_logits))\n","print(\"Corrupted Metric:\", factual_metric(corrupted_logits))"]},{"cell_type":"code","execution_count":42,"metadata":{},"outputs":[],"source":["# corrupted_value, corrupted_cache, corrupted_grad_cache = get_cache_fwd_and_bwd(gpt2_xl, corrupted_prompt, factual_metric)"]},{"cell_type":"code","execution_count":43,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["Clean: ['<|endoftext|>', 'The', ' E', 'iff', 'el', ' Tower', ' is', ' located', ' in', ' the', ' city', ' of']\n","Corrupted: ['<|endoftext|>', 'The', ' Col', 'os', 'se', 'um', ' is', ' located', ' in', ' the', ' city', ' of']\n"]}],"source":["clean_tokens = gpt2_xl.to_tokens(clean_prompt)\n","clean_str_tokens = gpt2_xl.to_str_tokens(clean_prompt)\n","corrupted_tokens = gpt2_xl.to_tokens(corrupted_prompt)\n","corrupted_str_tokens = gpt2_xl.to_str_tokens(corrupted_prompt)\n","print(\"Clean:\", clean_str_tokens)\n","print(\"Corrupted:\", corrupted_str_tokens)"]},{"cell_type":"code","execution_count":44,"metadata":{},"outputs":[{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"b767eef7a3cd49b9b3cb6e5301463f08","version_major":2,"version_minor":0},"text/plain":[" 0%| | 0/48 [00:00\n","\n","\n","
\n","
\n","\n",""]},"metadata":{},"output_type":"display_data"}],"source":["def act_patch_residual(clean_cache, corrupted_tokens, model: HookedTransformer, metric):\n"," if len(corrupted_tokens.shape) == 2:\n"," corrupted_tokens = corrupted_tokens[0]\n"," residual_patches = torch.zeros(\n"," (model.cfg.n_layers, len(corrupted_tokens)), device=model.cfg.device\n"," )\n","\n"," def residual_hook(resid_pre, hook, layer, pos):\n"," resid_pre[:, pos, :] = clean_cache[\"resid_pre\", layer][:, pos, :]\n"," return resid_pre\n","\n"," for layer in tqdm.tqdm(range(model.cfg.n_layers)):\n"," for pos in range(len(corrupted_tokens)):\n"," patched_logits = model.run_with_hooks(\n"," corrupted_tokens,\n"," fwd_hooks=[\n"," (\n"," f\"blocks.{layer}.hook_resid_pre\",\n"," partial(residual_hook, layer=layer, pos=pos),\n"," )\n"," ],\n"," )\n"," residual_patches[layer, pos] = metric(patched_logits).item()\n"," return residual_patches\n","\n","\n","residual_act_patch = act_patch_residual(\n"," clean_cache, corrupted_tokens, gpt2_xl, factual_metric\n",")\n","\n","imshow(\n"," residual_act_patch,\n"," title=\"Factual Recall Patching (Residual)\",\n"," xaxis=\"Position\",\n"," yaxis=\"Layer\",\n"," x=clean_str_tokens,\n",")"]}],"metadata":{"kernelspec":{"display_name":"base","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.8"},"orig_nbformat":4,"vscode":{"interpreter":{"hash":"d4d1e4263499bec80672ea0156c357c1ee493ec2b1c70f0acce89fc37c4a6abe"}}},"nbformat":4,"nbformat_minor":2} diff --git a/demos/BERT.ipynb b/demos/BERT.ipynb index 5c2c96c..d086fed 100644 --- a/demos/BERT.ipynb +++ b/demos/BERT.ipynb @@ -72,7 +72,7 @@ " # PySvelte is an unmaintained visualization library, use it as a backup if circuitsvis isn't working\n", " # # Install another version of node that makes PySvelte work way faster\n", " # !curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -; sudo apt-get install -y nodejs\n", - " # %pip install git+https://github.com/TransformerLensOrg/PySvelte.git\n", + " # %pip install git+https://github.com/neelnanda-io/PySvelte.git\n", "except:\n", " IN_COLAB = False\n", "\n", diff --git a/demos/Grokking_Demo.ipynb b/demos/Grokking_Demo.ipynb index b0caedf..2604967 100644 --- a/demos/Grokking_Demo.ipynb +++ b/demos/Grokking_Demo.ipynb @@ -65,7 +65,7 @@ " # PySvelte is an unmaintained visualization library, use it as a backup if circuitsvis isn't working\n", " # # Install another version of node that makes PySvelte work way faster\n", " # !curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -; sudo apt-get install -y nodejs\n", - " # %pip install git+https://github.com/TransformerLensOrg/PySvelte.git\n", + " # %pip install git+https://github.com/neelnanda-io/PySvelte.git\n", "except:\n", " IN_COLAB = False\n", " print(\"Running as a Jupyter notebook - intended for development only!\")\n", @@ -932,7 +932,7 @@ } ], "source": [ - "%pip install git+https://github.com/TransformerLensOrg/neel-plotly.git \n", + "%pip install git+https://github.com/neelnanda-io/neel-plotly.git \n", "from neel_plotly.plot import line\n", "line([train_losses[::100], test_losses[::100]], x=np.arange(0, len(train_losses), 100), xaxis=\"Epoch\", yaxis=\"Loss\", log_y=True, title=\"Training Curve for Modular Addition\", line_labels=['train', 'test'], toggle_x=True, toggle_y=True)" ] diff --git a/demos/Head_Detector_Demo.ipynb b/demos/Head_Detector_Demo.ipynb index 4f8ccf5..33c9b09 100644 --- a/demos/Head_Detector_Demo.ipynb +++ b/demos/Head_Detector_Demo.ipynb @@ -145,10 +145,10 @@ "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.10->transformer-lens==0.0.0) (1.3.0)\n", "Requirement already satisfied: smmap<6,>=3.0.1 in /usr/local/lib/python3.10/dist-packages (from gitdb<5,>=4.0.1->GitPython!=3.1.29,>=1.0.0->wandb>=0.13.5->transformer-lens==0.0.0) (5.0.0)\n", "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", - "Collecting git+https://github.com/TransformerLensOrg/neel-plotly.git\n", - " Cloning https://github.com/TransformerLensOrg/neel-plotly.git to /tmp/pip-req-build-u8mujxc3\n", - " Running command git clone --filter=blob:none --quiet https://github.com/TransformerLensOrg/neel-plotly.git /tmp/pip-req-build-u8mujxc3\n", - " Resolved https://github.com/TransformerLensOrg/neel-plotly.git to commit 6dc096fdc575da978d3e56489f2347d95cd397e7\n", + "Collecting git+https://github.com/neelnanda-io/neel-plotly.git\n", + " Cloning https://github.com/neelnanda-io/neel-plotly.git to /tmp/pip-req-build-u8mujxc3\n", + " Running command git clone --filter=blob:none --quiet https://github.com/neelnanda-io/neel-plotly.git /tmp/pip-req-build-u8mujxc3\n", + " Resolved https://github.com/neelnanda-io/neel-plotly.git to commit 6dc096fdc575da978d3e56489f2347d95cd397e7\n", " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n", "Requirement already satisfied: einops in /usr/local/lib/python3.10/dist-packages (from neel-plotly==0.0.0) (0.6.1)\n", "Requirement already satisfied: numpy in /usr/local/lib/python3.10/dist-packages (from neel-plotly==0.0.0) (1.24.3)\n", @@ -318,10 +318,10 @@ "if IN_COLAB or IN_GITHUB:\n", " %pip install git+https://github.com/TransformerLensOrg/TransformerLens.git\n", " # Install Neel's personal plotting utils\n", - " %pip install git+https://github.com/TransformerLensOrg/neel-plotly.git\n", + " %pip install git+https://github.com/neelnanda-io/neel-plotly.git\n", " # Install another version of node that makes PySvelte work way faster\n", " !curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -; sudo apt-get install -y nodejs\n", - " %pip install git+https://github.com/TransformerLensOrg/PySvelte.git\n", + " %pip install git+https://github.com/neelnanda-io/PySvelte.git\n", " # Needed for PySvelte to work, v3 came out and broke things...\n", " %pip install typeguard==2.13.3\n", " %pip install typing-extensions" diff --git a/demos/Interactive_Neuroscope.ipynb b/demos/Interactive_Neuroscope.ipynb index 798d8cf..e6999f9 100644 --- a/demos/Interactive_Neuroscope.ipynb +++ b/demos/Interactive_Neuroscope.ipynb @@ -36,25 +36,23 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Running as a Jupyter notebook - intended for development only!\n", - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" + "Running as a Jupyter notebook - intended for development only!\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "/var/folders/m3/z6c6rcdj1rbb2jh9vqpgvxg40000gn/T/ipykernel_99314/1752105691.py:19: DeprecationWarning: `magic(...)` is deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s).\n", + "/var/folders/m3/z6c6rcdj1rbb2jh9vqpgvxg40000gn/T/ipykernel_63049/1105475986.py:19: DeprecationWarning: `magic(...)` is deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s).\n", " ipython.magic(\"load_ext autoreload\")\n", - "/var/folders/m3/z6c6rcdj1rbb2jh9vqpgvxg40000gn/T/ipykernel_99314/1752105691.py:20: DeprecationWarning: `magic(...)` is deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s).\n", + "/var/folders/m3/z6c6rcdj1rbb2jh9vqpgvxg40000gn/T/ipykernel_63049/1105475986.py:20: DeprecationWarning: `magic(...)` is deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s).\n", " ipython.magic(\"autoreload 2\")\n" ] } @@ -83,12 +81,13 @@ "\n", "if IN_COLAB or IN_GITHUB:\n", " %pip install transformer_lens\n", - " %pip install gradio" + " %pip install gradio\n", + " %pip install datasets==2.19.1" ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -109,17 +108,26 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded pretrained model gpt2-small into HookedTransformer\n" + ] + } + ], "source": [ + "# NBVAL_IGNORE_OUTPUT\n", "model_name = \"gpt2-small\"\n", "model = HookedTransformer.from_pretrained(model_name)" ] }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -145,7 +153,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -165,7 +173,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -200,7 +208,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -273,7 +281,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -349,7 +357,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -388,14 +396,14 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Running on local URL: http://127.0.0.1:7861\n" + "Running on local URL: http://127.0.0.1:7860\n" ] }, { @@ -412,7 +420,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Running on public URL: https://39d0ba09838527bb1c.gradio.live\n", + "Running on public URL: https://7a615281b36111d2e4.gradio.live\n", "\n", "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n" ] @@ -420,7 +428,7 @@ { "data": { "text/html": [ - "
" + "
" ], "text/plain": [ "" @@ -433,7 +441,7 @@ "data": { "text/plain": [] }, - "execution_count": 23, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -460,7 +468,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.8" + "version": "3.11.9" }, "orig_nbformat": 4, "vscode": { diff --git a/demos/Main_Demo.ipynb b/demos/Main_Demo.ipynb index a39d220..c0fed32 100644 --- a/demos/Main_Demo.ipynb +++ b/demos/Main_Demo.ipynb @@ -534,7 +534,7 @@ "The IOI task is the task of identifying that a sentence like \"After John and Mary went to the store, Mary gave a bottle of milk to\" continues with \" John\" rather than \" Mary\" (ie, finding the indirect object), and Redwood Research have [an excellent paper studying the underlying circuit in GPT-2 Small](https://arxiv.org/abs/2211.00593).\n", "\n", "**[Activation patching](https://dynalist.io/d/n2ZWtnoYHrU1s4vnFSAQ519J#z=qeWBvs-R-taFfcCq-S_hgMqx)** is a technique from [Kevin Meng and David Bau's excellent ROME paper](https://rome.baulab.info/). The goal is to identify which model activations are important for completing a task. We do this by setting up a **clean prompt** and a **corrupted prompt** and a **metric** for performance on the task. We then pick a specific model activation, run the model on the corrupted prompt, but then *intervene* on that activation and patch in its value when run on the clean prompt. We then apply the metric, and see how much this patch has recovered the clean performance. \n", - "(See [a more detailed demonstration of activation patching here](https://colab.research.google.com/github.com/TransformerLensOrg/TransformerLens/blob/main/demos/Exploratory_Analysis_Demo.ipynb))" + "(See [a more detailed demonstration of activation patching here](https://colab.research.google.com/github/TransformerLensOrg/TransformerLens/blob/main/demos/Exploratory_Analysis_Demo.ipynb))" ] }, { @@ -731,7 +731,7 @@ "\n", "Induction circuits are a very important circuit in generative language models, which are used to detect and continue repeated subsequences. They consist of two heads in separate layers that compose together, a **previous token head** which always attends to the previous token, and an **induction head** which attends to the token *after* an earlier copy of the current token. \n", "\n", - "To see why this is important, let's say that the model is trying to predict the next token in a news article about Michael Jordan. The token \" Michael\", in general, could be followed by many surnames. But an induction head will look from that occurence of \" Michael\" to the token after previous occurences of \" Michael\", ie \" Jordan\" and can confidently predict that that will come next." + "To see why this is important, let's say that the model is trying to predict the next token in a news article about Michael Jordan. The token \" Michael\", in general, could be followed by many surnames. But an induction head will look from that occurrence of \" Michael\" to the token after previous occurrences of \" Michael\", ie \" Jordan\" and can confidently predict that that will come next." ] }, { @@ -804,7 +804,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The induction heads will be attending from the second occurence of each token to the token *after* its first occurence, ie the token `50-1==49` places back. So by looking at the average attention paid 49 tokens back, we can identify induction heads! Let's define a hook to do this!\n", + "The induction heads will be attending from the second occurrence of each token to the token *after* its first occurrence, ie the token `50-1==49` places back. So by looking at the average attention paid 49 tokens back, we can identify induction heads! Let's define a hook to do this!\n", "\n", "
Technical details\n", "\n", @@ -1695,7 +1695,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If there are multiple copies of the token, we can set `mode=\"first\"` to find the first occurence's position and `mode=\"last\"` to find the last" + "If there are multiple copies of the token, we can set `mode=\"first\"` to find the first occurrence's position and `mode=\"last\"` to find the last" ] }, { @@ -1707,17 +1707,17 @@ "name": "stdout", "output_type": "stream", "text": [ - "First occurence 2\n", - "Final occurence 13\n" + "First occurrence 2\n", + "Final occurrence 13\n" ] } ], "source": [ - "print(\"First occurence\", model.get_token_position(\n", + "print(\"First occurrence\", model.get_token_position(\n", " \" cat\", \n", " \"The cat sat on the mat. The mat sat on the cat.\", \n", " mode=\"first\"))\n", - "print(\"Final occurence\", model.get_token_position(\n", + "print(\"Final occurrence\", model.get_token_position(\n", " \" cat\", \n", " \"The cat sat on the mat. The mat sat on the cat.\", \n", " mode=\"last\"))" diff --git a/demos/Othello_GPT.ipynb b/demos/Othello_GPT.ipynb index 4f56cb0..191785a 100644 --- a/demos/Othello_GPT.ipynb +++ b/demos/Othello_GPT.ipynb @@ -203,7 +203,7 @@ " # PySvelte is an unmaintained visualization library, use it as a backup if circuitsvis isn't working\n", " # # Install another version of node that makes PySvelte work way faster\n", " # !curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -; sudo apt-get install -y nodejs\n", - " # %pip install git+https://github.com/TransformerLensOrg/PySvelte.git\n", + " # %pip install git+https://github.com/neelnanda-io/PySvelte.git\n", "except:\n", " IN_COLAB = False\n", " print(\"Running as a Jupyter notebook - intended for development only!\")\n", diff --git a/demos/Patchscopes_Generation_Demo.ipynb b/demos/Patchscopes_Generation_Demo.ipynb new file mode 100644 index 0000000..49c4655 --- /dev/null +++ b/demos/Patchscopes_Generation_Demo.ipynb @@ -0,0 +1,3776 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + " \"Open\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Patchscopes & Generation with Patching\n", + "\n", + "This notebook contains a demo for Patchscopes (https://arxiv.org/pdf/2401.06102) and demonstrates how to generate multiple tokens with patching. Since there're also some applications in [Patchscopes](##Patchscopes-pipeline) that require generating multiple tokens with patching, I think it's suitable to put both of them in the same notebook. Additionally, generation with patching can be well-described using Patchscopes. Therefore, I simply implement it with the Patchscopes pipeline (see [here](##Generation-with-patching))." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup (Ignore)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Janky code to do different setup when run in a Colab notebook vs VSCode\n", + "import os\n", + "\n", + "DEBUG_MODE = False\n", + "IN_GITHUB = os.getenv(\"GITHUB_ACTIONS\") == \"true\"\n", + "try:\n", + " import google.colab\n", + "\n", + " IN_COLAB = True\n", + " print(\"Running as a Colab notebook\")\n", + "except:\n", + " IN_COLAB = False\n", + " print(\"Running as a Jupyter notebook - intended for development only!\")\n", + " from IPython import get_ipython\n", + "\n", + " ipython = get_ipython()\n", + " # Code to automatically update the HookedTransformer code as its edited without restarting the kernel\n", + " ipython.magic(\"load_ext autoreload\")\n", + " ipython.magic(\"autoreload 2\")\n", + "\n", + "if IN_COLAB or IN_GITHUB:\n", + " %pip install transformer_lens\n", + " %pip install torchtyping\n", + " # Install my janky personal plotting utils\n", + " %pip install git+https://github.com/neelnanda-io/neel-plotly.git\n", + " # Install another version of node that makes PySvelte work way faster\n", + " %pip install circuitsvis\n", + " # Needed for PySvelte to work, v3 came out and broke things...\n", + " %pip install typeguard==2.13.3\n", + "\n", + "import torch\n", + "from typing import List, Callable, Tuple, Union\n", + "from functools import partial\n", + "from jaxtyping import Float\n", + "from transformer_lens import HookedTransformer\n", + "from transformer_lens.ActivationCache import ActivationCache\n", + "import transformer_lens.utils as utils\n", + "from transformer_lens.hook_points import (\n", + " HookPoint,\n", + ") # Hooking utilities" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Helper Funcs\n", + "\n", + "A helper function to plot logit lens" + ] + }, + { + "cell_type": "code", + "execution_count": 116, + "metadata": {}, + "outputs": [], + "source": [ + "import plotly.graph_objects as go\n", + "import numpy as np\n", + "\n", + "# Parameters\n", + "num_layers = 5\n", + "seq_len = 10\n", + "\n", + "# Create a matrix of tokens for demonstration\n", + "tokens = np.array([[\"token_{}_{}\".format(i, j) for j in range(seq_len)] for i in range(num_layers)])[::-1]\n", + "values = np.random.rand(num_layers, seq_len)\n", + "orig_tokens = ['Token {}'.format(i) for i in range(seq_len)]\n", + "\n", + "def draw_logit_lens(num_layers, seq_len, orig_tokens, tokens, values):\n", + " # Create the heatmap\n", + " fig = go.Figure(data=go.Heatmap(\n", + " z=values,\n", + " x=orig_tokens,\n", + " y=['Layer {}'.format(i) for i in range(num_layers)][::-1],\n", + " colorscale='Blues',\n", + " showscale=True,\n", + " colorbar=dict(title='Value')\n", + " ))\n", + "\n", + " # Add text annotations\n", + " annotations = []\n", + " for i in range(num_layers):\n", + " for j in range(seq_len):\n", + " annotations.append(\n", + " dict(\n", + " x=j, y=i,\n", + " text=tokens[i, j],\n", + " showarrow=False,\n", + " font=dict(color='white')\n", + " )\n", + " )\n", + "\n", + " fig.update_layout(\n", + " annotations=annotations,\n", + " xaxis=dict(side='top'),\n", + " yaxis=dict(autorange='reversed'),\n", + " margin=dict(l=50, r=50, t=100, b=50),\n", + " width=1000,\n", + " height=600,\n", + " plot_bgcolor='white'\n", + " )\n", + "\n", + " # Show the plot\n", + " fig.show()\n", + "# draw_logit_lens(num_layers, seq_len, orig_tokens, tokens, values)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Model Preparation" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded pretrained model gpt2-small into HookedTransformer\n" + ] + }, + { + "data": { + "text/plain": [ + "HookedTransformer(\n", + " (embed): Embed()\n", + " (hook_embed): HookPoint()\n", + " (pos_embed): PosEmbed()\n", + " (hook_pos_embed): HookPoint()\n", + " (blocks): ModuleList(\n", + " (0-11): 12 x TransformerBlock(\n", + " (ln1): LayerNormPre(\n", + " (hook_scale): HookPoint()\n", + " (hook_normalized): HookPoint()\n", + " )\n", + " (ln2): LayerNormPre(\n", + " (hook_scale): HookPoint()\n", + " (hook_normalized): HookPoint()\n", + " )\n", + " (attn): Attention(\n", + " (hook_k): HookPoint()\n", + " (hook_q): HookPoint()\n", + " (hook_v): HookPoint()\n", + " (hook_z): HookPoint()\n", + " (hook_attn_scores): HookPoint()\n", + " (hook_pattern): HookPoint()\n", + " (hook_result): HookPoint()\n", + " )\n", + " (mlp): MLP(\n", + " (hook_pre): HookPoint()\n", + " (hook_post): HookPoint()\n", + " )\n", + " (hook_attn_in): HookPoint()\n", + " (hook_q_input): HookPoint()\n", + " (hook_k_input): HookPoint()\n", + " (hook_v_input): HookPoint()\n", + " (hook_mlp_in): HookPoint()\n", + " (hook_attn_out): HookPoint()\n", + " (hook_mlp_out): HookPoint()\n", + " (hook_resid_pre): HookPoint()\n", + " (hook_resid_mid): HookPoint()\n", + " (hook_resid_post): HookPoint()\n", + " )\n", + " )\n", + " (ln_final): LayerNormPre(\n", + " (hook_scale): HookPoint()\n", + " (hook_normalized): HookPoint()\n", + " )\n", + " (unembed): Unembed()\n", + ")" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# NBVAL_IGNORE_OUTPUT\n", + "# I'm using an M2 macbook air, so I use CPU for better support\n", + "model = HookedTransformer.from_pretrained(\"gpt2-small\", device=\"cpu\")\n", + "model.eval()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Patchscopes Definition\n", + "\n", + "Here we first wirte down the formal definition decribed in the paper https://arxiv.org/pdf/2401.06102.\n", + "\n", + "The representations are:\n", + "\n", + "source: (S, i, M, l), where S is the source prompt, i is the source position, M is the source model, and l is the source layer.\n", + "\n", + "target: (T,i*,f,M*,l*), where T is the target prompt, i* is the target position, M* is the target model, l* is the target layer, and f is the mapping function that takes the original hidden states as input and output the target hidden states\n", + "\n", + "By defulat, S = T, i = i*, M = M*, l = l*, f = identity function" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Patchscopes Pipeline\n", + "\n", + "### Get hidden representation from the source model\n", + "\n", + "1. We first need to extract the source hidden states from model M at position i of layer l with prompt S. In TransformerLens, we can do this using run_with_cache.\n", + "2. Then, we map the source representation with a function f, and feed the hidden representation to the target position using a hook. Specifically, we focus on residual stream (resid_post), whereas you can manipulate more fine-grainedly with TransformerLens\n" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "prompts = [\"Patchscopes is a nice tool to inspect hidden representation of language model\"]\n", + "input_tokens = model.to_tokens(prompts)\n", + "clean_logits, clean_cache = model.run_with_cache(input_tokens)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "def get_source_representation(prompts: List[str], layer_id: int, model: HookedTransformer, pos_id: Union[int, List[int]]=None) -> torch.Tensor:\n", + " \"\"\"Get source hidden representation represented by (S, i, M, l)\n", + " \n", + " Args:\n", + " - prompts (List[str]): a list of source prompts\n", + " - layer_id (int): the layer id of the model\n", + " - model (HookedTransformer): the source model\n", + " - pos_id (Union[int, List[int]]): the position id(s) of the model, if None, return all positions\n", + "\n", + " Returns:\n", + " - source_rep (torch.Tensor): the source hidden representation\n", + " \"\"\"\n", + " input_tokens = model.to_tokens(prompts)\n", + " _, cache = model.run_with_cache(input_tokens)\n", + " layer_name = \"blocks.{id}.hook_resid_post\"\n", + " layer_name = layer_name.format(id=layer_id)\n", + " if pos_id is None:\n", + " return cache[layer_name][:, :, :]\n", + " else:\n", + " return cache[layer_name][:, pos_id, :]" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "source_rep = get_source_representation(\n", + " prompts=[\"Patchscopes is a nice tool to inspect hidden representation of language model\"],\n", + " layer_id=2,\n", + " model=model,\n", + " pos_id=5\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Feed the representation to the target position\n", + "\n", + "First we need to map the representation using mapping function f, and then feed the target representation to the target position represented by (T,i*,f,M*,l*)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "# here we use an identity function for demonstration purposes\n", + "def identity_function(source_rep: torch.Tensor) -> torch.Tensor:\n", + " return source_rep" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "# recall the target representation (T,i*,f,M*,l*), and we also need the hidden representation from our source model (S, i, M, l)\n", + "def feed_source_representation(source_rep: torch.Tensor, prompt: List[str], f: Callable, model: HookedTransformer, layer_id: int, pos_id: Union[int, List[int]]=None) -> ActivationCache:\n", + " \"\"\"Feed the source hidden representation to the target model\n", + " \n", + " Args:\n", + " - source_rep (torch.Tensor): the source hidden representation\n", + " - prompt (List[str]): the target prompt\n", + " - f (Callable): the mapping function\n", + " - model (HookedTransformer): the target model\n", + " - layer_id (int): the layer id of the target model\n", + " - pos_id (Union[int, List[int]]): the position id(s) of the target model, if None, return all positions\n", + " \"\"\"\n", + " mapped_rep = f(source_rep)\n", + " # similar to what we did for activation patching, we need to define a function to patch the hidden representation\n", + " def resid_ablation_hook(\n", + " value: Float[torch.Tensor, \"batch pos d_resid\"],\n", + " hook: HookPoint\n", + " ) -> Float[torch.Tensor, \"batch pos d_resid\"]:\n", + " # print(f\"Shape of the value tensor: {value.shape}\")\n", + " # print(f\"Shape of the hidden representation at the target position: {value[:, pos_id, :].shape}\")\n", + " value[:, pos_id, :] = mapped_rep\n", + " return value\n", + " \n", + " input_tokens = model.to_tokens(prompt)\n", + "\n", + " logits = model.run_with_hooks(\n", + " input_tokens,\n", + " return_type=\"logits\",\n", + " fwd_hooks=[(\n", + " utils.get_act_name(\"resid_post\", layer_id),\n", + " resid_ablation_hook\n", + " )]\n", + " )\n", + " \n", + " return logits" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "patched_logits = feed_source_representation(\n", + " source_rep=source_rep,\n", + " prompt=prompts,\n", + " pos_id=3,\n", + " f=identity_function,\n", + " model=model,\n", + " layer_id=2\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(tensor([[ 3.5811, 3.5322, 2.6463, ..., -4.3504, -1.7939, 3.3541]],\n", + " grad_fn=),\n", + " tensor([[ 3.2431, 3.2708, 1.9591, ..., -4.2666, -2.2141, 3.4965]],\n", + " grad_fn=))" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# NBVAL_IGNORE_OUTPUT\n", + "clean_logits[:, 5], patched_logits[:, 5]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generation with Patching\n", + "\n", + "In the last step, we've implemented the basic version of Patchscopes where we can only run one single forward pass. Let's now unlock the power by allowing it to generate multiple tokens!" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_with_patching(model: HookedTransformer, prompts: List[str], target_f: Callable, max_new_tokens: int = 50):\n", + " temp_prompts = prompts\n", + " input_tokens = model.to_tokens(temp_prompts)\n", + " for _ in range(max_new_tokens):\n", + " logits = target_f(\n", + " prompt=temp_prompts,\n", + " )\n", + " next_tok = torch.argmax(logits[:, -1, :])\n", + " input_tokens = torch.cat((input_tokens, next_tok.view(input_tokens.size(0), 1)), dim=1)\n", + " temp_prompts = model.to_string(input_tokens)\n", + "\n", + " return model.to_string(input_tokens)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "<|endoftext|>Patchscopes is a nice tool to inspect hidden representation of language model file bit file\n" + ] + } + ], + "source": [ + "prompts = [\"Patchscopes is a nice tool to inspect hidden representation of language model\"]\n", + "input_tokens = model.to_tokens(prompts)\n", + "target_f = partial(\n", + " feed_source_representation,\n", + " source_rep=source_rep,\n", + " pos_id=-1,\n", + " f=identity_function,\n", + " model=model,\n", + " layer_id=2\n", + ")\n", + "gen = generate_with_patching(model, prompts, target_f, max_new_tokens=3)\n", + "print(gen)" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Patchscopes is a nice tool to inspect hidden representation of language model.\n", + "\n", + "It is a simple tool to inspect hidden representation of language model.\n", + "\n", + "It is a simple tool to inspect hidden representation of language model.\n", + "\n", + "It is a simple tool to inspect hidden representation of language model.\n", + "\n", + "It is\n" + ] + } + ], + "source": [ + "# Original generation\n", + "print(model.generate(prompts[0], verbose=False, max_new_tokens=50, do_sample=False))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Application Examples" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Logit Lens\n", + "\n", + "For Logit Lens, the configuration is l* ← L*. Here, L* is the last layer." + ] + }, + { + "cell_type": "code", + "execution_count": 104, + "metadata": {}, + "outputs": [], + "source": [ + "token_list = []\n", + "value_list = []\n", + "\n", + "def identity_function(source_rep: torch.Tensor) -> torch.Tensor:\n", + " return source_rep\n", + "\n", + "for source_layer_id in range(12):\n", + " # Prepare source representation\n", + " source_rep = get_source_representation(\n", + " prompts=[\"Patchscopes is a nice tool to inspect hidden representation of language model\"],\n", + " layer_id=source_layer_id,\n", + " model=model,\n", + " pos_id=None\n", + " )\n", + "\n", + " logits = feed_source_representation(\n", + " source_rep=source_rep,\n", + " prompt=[\"Patchscopes is a nice tool to inspect hidden representation of language model\"],\n", + " f=identity_function,\n", + " model=model,\n", + " layer_id=11\n", + " )\n", + " token_list.append([model.to_string(token_id.item()) for token_id in logits.argmax(dim=-1).squeeze()])\n", + " value_list.append([value for value in torch.max(logits.softmax(dim=-1), dim=-1)[0].detach().squeeze().numpy()])" + ] + }, + { + "cell_type": "code", + "execution_count": 109, + "metadata": {}, + "outputs": [], + "source": [ + "token_list = np.array(token_list[::-1])\n", + "value_list = np.array(value_list[::-1])" + ] + }, + { + "cell_type": "code", + "execution_count": 110, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "colorbar": { + "title": { + "text": "Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "showscale": true, + "type": "heatmap", + "x": [ + "<|endoftext|>", + "Patch", + "sc", + "opes", + " is", + " a", + " nice", + " tool", + " to", + " inspect", + " hidden", + " representation", + " of", + " language", + " model" + ], + "y": [ + "Layer 11", + "Layer 10", + "Layer 9", + "Layer 8", + "Layer 7", + "Layer 6", + "Layer 5", + "Layer 4", + "Layer 3", + "Layer 2", + "Layer 1", + "Layer 0" + ], + "z": [ + [ + 0.34442219138145447, + 0.9871702790260315, + 0.3734475076198578, + 0.9830440878868103, + 0.4042338728904724, + 0.09035539627075195, + 0.8022230863571167, + 0.5206465125083923, + 0.14175501465797424, + 0.9898471236228943, + 0.9606538414955139, + 0.9691148996353149, + 0.662227988243103, + 0.9815096855163574, + 0.9055094718933105 + ], + [ + 0.08009976148605347, + 0.99101722240448, + 0.45667293667793274, + 0.40307697653770447, + 0.49327367544174194, + 0.08549172431230545, + 0.7428992390632629, + 0.8611035943031311, + 0.1983162760734558, + 0.9246276021003723, + 0.8956946730613708, + 0.8638046383857727, + 0.8365117311477661, + 0.9618501663208008, + 0.9175702333450317 + ], + [ + 0.02691030502319336, + 0.9732530117034912, + 0.19330987334251404, + 0.381843239068985, + 0.33808818459510803, + 0.07934993505477905, + 0.3974476158618927, + 0.7191767692565918, + 0.24212224781513214, + 0.7858667373657227, + 0.866357684135437, + 0.6622256636619568, + 0.8740373849868774, + 0.947133481502533, + 0.8450764417648315 + ], + [ + 0.027061497792601585, + 0.9609430432319641, + 0.2772334814071655, + 0.20079827308654785, + 0.2932577431201935, + 0.1255684345960617, + 0.32114332914352417, + 0.6489707827568054, + 0.2919656038284302, + 0.18173590302467346, + 0.635391891002655, + 0.5701303482055664, + 0.8785448670387268, + 0.8575655221939087, + 0.6919821500778198 + ], + [ + 0.026887305080890656, + 0.9309146404266357, + 0.44758421182632446, + 0.24046003818511963, + 0.28474941849708557, + 0.20104897022247314, + 0.5028793811798096, + 0.48273345828056335, + 0.2584459185600281, + 0.36538586020469666, + 0.20586784183979034, + 0.3072110712528229, + 0.9045845866203308, + 0.5042338371276855, + 0.4879302978515625 + ], + [ + 0.0265483595430851, + 0.9315882921218872, + 0.41395631432533264, + 0.2468952238559723, + 0.35624295473098755, + 0.21814416348934174, + 0.6175792813301086, + 0.7821283340454102, + 0.28484007716178894, + 0.3186572194099426, + 0.16824035346508026, + 0.5927833914756775, + 0.8808191418647766, + 0.5171196460723877, + 0.2029583901166916 + ], + [ + 0.026423994451761246, + 0.898944079875946, + 0.32038140296936035, + 0.44839850068092346, + 0.2796024978160858, + 0.20586445927619934, + 0.6313580274581909, + 0.87591552734375, + 0.18971839547157288, + 0.3038368225097656, + 0.36893585324287415, + 0.5965255498886108, + 0.7505314946174622, + 0.5989011526107788, + 0.10610682517290115 + ], + [ + 0.026437079533934593, + 0.6845366358757019, + 0.3912840485572815, + 0.37950050830841064, + 0.5224342346191406, + 0.2038283497095108, + 0.3475077748298645, + 0.647609293460846, + 0.11305152624845505, + 0.4017726182937622, + 0.4405157268047333, + 0.533568799495697, + 0.5206188559532166, + 0.2670389711856842, + 0.08740855008363724 + ], + [ + 0.026673221960663795, + 0.36045604944229126, + 0.27727553248405457, + 0.4515568017959595, + 0.5681671500205994, + 0.36901071667671204, + 0.5300043821334839, + 0.494934618473053, + 0.3656132221221924, + 0.40456005930900574, + 0.2656775712966919, + 0.2756248712539673, + 0.517121434211731, + 0.3028433322906494, + 0.09847757965326309 + ], + [ + 0.026949577033519745, + 0.3112040162086487, + 0.22643150389194489, + 0.7095355987548828, + 0.5966493487358093, + 0.4613777995109558, + 0.8436885476112366, + 0.4194002151489258, + 0.22365105152130127, + 0.4558623731136322, + 0.32150164246559143, + 0.4018287658691406, + 0.8275868892669678, + 0.3780366778373718, + 0.19973652064800262 + ], + [ + 0.027445374056696892, + 0.3283821940422058, + 0.5192154049873352, + 0.1790430098772049, + 0.6429017782211304, + 0.3577035665512085, + 0.6037949919700623, + 0.5884966254234314, + 0.18566730618476868, + 0.3142710030078888, + 0.15301460027694702, + 0.3585647940635681, + 0.4576294720172882, + 0.1486930102109909, + 0.13506801426410675 + ], + [ + 0.062298569828271866, + 0.24093002080917358, + 0.16585318744182587, + 0.16210544109344482, + 0.449150949716568, + 0.042253680527210236, + 0.11057071387767792, + 0.3447357416152954, + 0.08157400786876678, + 0.13642098009586334, + 0.07241284847259521, + 0.25115686655044556, + 0.084745854139328, + 0.0951341837644577, + 0.1267273873090744 + ] + ] + } + ], + "layout": { + "annotations": [ + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "\n", + "x": 0, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Patch", + "x": 1, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "rawl", + "x": 2, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "opes", + "x": 3, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " not", + "x": 4, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " new", + "x": 5, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " nice", + "x": 6, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "tips", + "x": 7, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " get", + "x": 8, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " inspect", + "x": 9, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " hidden", + "x": 10, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " representation", + "x": 11, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 12, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " language", + "x": 13, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " model", + "x": 14, + "y": 0 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "\n", + "x": 0, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Patch", + "x": 1, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "urry", + "x": 2, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Operator", + "x": 3, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " not", + "x": 4, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " new", + "x": 5, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " nice", + "x": 6, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "tips", + "x": 7, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " get", + "x": 8, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " inspect", + "x": 9, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " hidden", + "x": 10, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " representation", + "x": 11, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 12, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " language", + "x": 13, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " model", + "x": 14, + "y": 1 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ",", + "x": 0, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Patch", + "x": 1, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "rawl", + "x": 2, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Operator", + "x": 3, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " not", + "x": 4, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " new", + "x": 5, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " nice", + "x": 6, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "tips", + "x": 7, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " keep", + "x": 8, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " inspect", + "x": 9, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " hidden", + "x": 10, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " representation", + "x": 11, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 12, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " language", + "x": 13, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " model", + "x": 14, + "y": 2 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ",", + "x": 0, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Patch", + "x": 1, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "atch", + "x": 2, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ":", + "x": 3, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " currently", + "x": 4, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " very", + "x": 5, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " thing", + "x": 6, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "tips", + "x": 7, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " keep", + "x": 8, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 9, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " hidden", + "x": 10, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " representation", + "x": 11, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 12, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " language", + "x": 13, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " model", + "x": 14, + "y": 3 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ",", + "x": 0, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Patch", + "x": 1, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "atch", + "x": 2, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ":", + "x": 3, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " currently", + "x": 4, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " unique", + "x": 5, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " little", + "x": 6, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 7, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " keep", + "x": 8, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " your", + "x": 9, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " hidden", + "x": 10, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " of", + "x": 11, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 12, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " language", + "x": 13, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " model", + "x": 14, + "y": 4 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ",", + "x": 0, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Patch", + "x": 1, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "reens", + "x": 2, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ":", + "x": 3, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " currently", + "x": 4, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " unique", + "x": 5, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " little", + "x": 6, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 7, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " keep", + "x": 8, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " your", + "x": 9, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " gem", + "x": 10, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " of", + "x": 11, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 12, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " language", + "x": 13, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " model", + "x": 14, + "y": 5 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ",", + "x": 0, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Patch", + "x": 1, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "ree", + "x": 2, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ":", + "x": 3, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " also", + "x": 4, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " unique", + "x": 5, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " little", + "x": 6, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 7, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " keep", + "x": 8, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 9, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " gems", + "x": 10, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " of", + "x": 11, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 12, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " language", + "x": 13, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " (", + "x": 14, + "y": 6 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ",", + "x": 0, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Patch", + "x": 1, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "ream", + "x": 2, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ":", + "x": 3, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " currently", + "x": 4, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " powerful", + "x": 5, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " little", + "x": 6, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 7, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " keep", + "x": 8, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " your", + "x": 9, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " gems", + "x": 10, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " of", + "x": 11, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 12, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " language", + "x": 13, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " (", + "x": 14, + "y": 7 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ",", + "x": 0, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "work", + "x": 1, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "ream", + "x": 2, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "¶", + "x": 3, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " currently", + "x": 4, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " powerful", + "x": 5, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 6, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "kit", + "x": 7, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " help", + "x": 8, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " your", + "x": 9, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " objects", + "x": 10, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " of", + "x": 11, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " objects", + "x": 12, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " objects", + "x": 13, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " data", + "x": 14, + "y": 8 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ",", + "x": 0, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "work", + "x": 1, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "ream", + "x": 2, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "¶", + "x": 3, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " a", + "x": 4, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 5, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 6, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 7, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " help", + "x": 8, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " your", + "x": 9, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " objects", + "x": 10, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " of", + "x": 11, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " objects", + "x": 12, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " strings", + "x": 13, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " variables", + "x": 14, + "y": 9 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ",", + "x": 0, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Notes", + "x": 1, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "rew", + "x": 2, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "¶", + "x": 3, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " a", + "x": 4, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 5, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 6, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " for", + "x": 7, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " help", + "x": 8, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " your", + "x": 9, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " items", + "x": 10, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " of", + "x": 11, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " objects", + "x": 12, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " objects", + "x": 13, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " objects", + "x": 14, + "y": 10 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "\n", + "x": 0, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " Notes", + "x": 1, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "rew", + "x": 2, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": "\n", + "x": 3, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " a", + "x": 4, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 5, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " tool", + "x": 6, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " for", + "x": 7, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " help", + "x": 8, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 9, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " files", + "x": 10, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " of", + "x": 11, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " the", + "x": 12, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": " features", + "x": 13, + "y": 11 + }, + { + "font": { + "color": "white" + }, + "showarrow": false, + "text": ".", + "x": 14, + "y": 11 + } + ], + "height": 600, + "margin": { + "b": 50, + "l": 50, + "r": 50, + "t": 100 + }, + "plot_bgcolor": "white", + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + }, + "width": 1000, + "xaxis": { + "side": "top" + }, + "yaxis": { + "autorange": "reversed" + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "num_layers = 12\n", + "seq_len = len(token_list[0])\n", + "orig_tokens = [model.to_string(token_id) for token_id in model.to_tokens([\"Patchscopes is a nice tool to inspect hidden representation of language model\"])[0]]\n", + "draw_logit_lens(num_layers, seq_len, orig_tokens, token_list, value_list)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Entity Description\n", + "\n", + "Entity description tries to answer \"how LLMs resolve entity mentions across multiple layers. Concretely, given a subject entity name, such as “the summer Olympics of 1996”, how does the model contextualize the input tokens of the entity and at which layer is it fully resolved?\"\n", + "\n", + "The configuration is l* ← l, i* ← m, and it requires generating multiple tokens. Here m refers to the last position (the position of x)" + ] + }, + { + "cell_type": "code", + "execution_count": 111, + "metadata": {}, + "outputs": [], + "source": [ + " # Prepare source representation\n", + "source_rep = get_source_representation(\n", + " prompts=[\"Diana, Princess of Wales\"],\n", + " layer_id=11,\n", + " model=model,\n", + " pos_id=-1\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 115, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generation by patching layer 0:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The \"The \"The \"The \"The \"The \"The \"The \"The\n", + "==============================\n", + "\n", + "Generation by patching layer 1:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The \"The \"The \"The \"The \"The \"The \"The \"The\n", + "==============================\n", + "\n", + "Generation by patching layer 2:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The\n", + "The\n", + "\n", + "\n", + "The\n", + "The\n", + "The\n", + "\n", + "\n", + "The\n", + "The\n", + "==============================\n", + "\n", + "Generation by patching layer 3:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The\n", + "\n", + "\n", + "The\n", + "\n", + "\n", + "The\n", + "\n", + "\n", + "The\n", + "\n", + "\n", + "The\n", + "==============================\n", + "\n", + "Generation by patching layer 4:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The United States\n", + "\n", + "\n", + "The United States\n", + "\n", + "\n", + "The United States\n", + "\n", + "\n", + "==============================\n", + "\n", + "Generation by patching layer 5:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The United States\n", + "\n", + "\n", + "The United States\n", + "\n", + "\n", + "The United States\n", + "\n", + "\n", + "==============================\n", + "\n", + "Generation by patching layer 6:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The United States is the world's most popular and the world's most beautiful.\n", + "\n", + "==============================\n", + "\n", + "Generation by patching layer 7:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The United States is the world's most popular and most beautiful country.\n", + "\n", + "\n", + "\n", + "==============================\n", + "\n", + "Generation by patching layer 8:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The United States is the world's largest exporter of the world's most expensive and\n", + "==============================\n", + "\n", + "Generation by patching layer 9:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The first time I saw the film, I was in the middle of a meeting with\n", + "==============================\n", + "\n", + "Generation by patching layer 10:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\n", + "\n", + "\n", + "The world's most famous actor, actor and producer, Leonardo DiCaprio, has\n", + "==============================\n", + "\n", + "Generation by patching layer 11:\n", + "<|endoftext|>Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x, and the world's largest consumer electronics company, Samsung Electronics Co., Ltd.\n", + "\n", + "\n", + "The\n", + "==============================\n", + "\n" + ] + } + ], + "source": [ + "target_prompt = [\"Syria: Country in the Middle East, Leonardo DiCaprio: American actor, Samsung: South Korean multinational major appliance and consumer electronics corporation, x\"]\n", + "# need to calcualte an absolute position, instead of a relative position\n", + "last_pos_id = len(model.to_tokens(target_prompt)[0]) - 1\n", + "# we need to define the function that takes the generation as input\n", + "for target_layer_id in range(12):\n", + " target_f = partial(\n", + " feed_source_representation,\n", + " source_rep=source_rep,\n", + " pos_id=last_pos_id,\n", + " f=identity_function,\n", + " model=model,\n", + " layer_id=target_layer_id\n", + " )\n", + " gen = generate_with_patching(model, target_prompt, target_f, max_new_tokens=20)\n", + " print(f\"Generation by patching layer {target_layer_id}:\\n{gen}\\n{'='*30}\\n\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, maybe the early layers of gpt2-small are doing something related to entity resolution, whereas the late layers are apparently not(?)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Zero-Shot Feature Extraction\n", + "\n", + "Zero-shot Feature Extraction \"Consider factual and com- monsense knowledge represented as triplets (σ,ρ,ω) of a subject (e.g., “United States”), a relation (e.g., “largest city of”), and an object (e.g.,\n", + "“New York City”). We investigate to what extent the object ω can be extracted from the last token representation of the subject σ in an arbitrary input context.\"\n", + "\n", + "The configuration is l∗ ← j′ ∈ [1,...,L∗], i∗ ← m, T ← relation verbalization followed by x" + ] + }, + { + "cell_type": "code", + "execution_count": 359, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Co-founder of company Apple, Steve Jobs, has said that Apple\\'s iPhone 6 and 6 Plus are \"the most important phones'" + ] + }, + "execution_count": 359, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# for a triplet (company Apple, co-founder of, Steve Jobs), we need to first make sure that the object is in the continuation\n", + "source_prompt = \"Co-founder of company Apple\"\n", + "model.generate(source_prompt, verbose=False, max_new_tokens=20, do_sample=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 366, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "<|endoftext|>Co-founder of x, co-founder of Google x, co-founder of Facebook x, co-founder of Twitter\n", + "<|endoftext|>Co-founder of x, co-founder of the company, and co-founder of the company, and co-founder\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to hide\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x has been accused of being a \"fraud\" by the US government.\n", + "\n", + "\n", + "The former\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x's, who has been working on the project for over a year, has been working on the project\n", + "<|endoftext|>Co-founder of x's, who has been working on the project for over a year, has been working on the project\n", + "<|endoftext|>Co-founder of xApple, who has been working on the iPhone since 2011, has been working on the iPhone since 2011\n", + "<|endoftext|>Co-founder of x, co-founder of Google x, co-founder of Facebook x, co-founder of Twitter\n", + "<|endoftext|>Co-founder of x, co-founder of Google, co-founder of Facebook, co-founder of Twitter, co\n", + "<|endoftext|>Co-founder of x, co-founder of x, co-founder of x, co-founder of x, co\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes mobile apps for the iPhone, iPad and iPod touch, says he's been\n", + "<|endoftext|>Co-founder of x, a company that makes a lot of things, has been accused of sexual harassment by a former employee\n", + "<|endoftext|>Co-founder of x's, who has been working on the project for over a year, has been working on the project\n", + "<|endoftext|>Co-founder of x's, who has been working on the project for over a year, has been working on the project\n", + "<|endoftext|>Co-founder of xApple, who has been working on the iPhone since 2011, has been working on the iPhone since 2011\n", + "<|endoftext|>Co-founder of x, co-founder of Google x, co-founder of Facebook x, co-founder of Twitter\n", + "<|endoftext|>Co-founder of x, co-founder of Google x, co-founder of Facebook x, co-founder of Twitter\n", + "<|endoftext|>Co-founder of x, co-founder of Google, co-founder of Facebook, co-founder of Twitter, co\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes software for the web, has been accused of using a \"secret\" code\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x's, who has been working on the project for over a year, has been working on the project\n", + "<|endoftext|>Co-founder of x Apple, Steve Jobs, has been accused of being a \"fraud\" by a former employee who\n", + "<|endoftext|>Co-founder of x, co-founder of Google x, co-founder of Facebook x, co-founder of Twitter\n", + "<|endoftext|>Co-founder of x, co-founder of the company, and co-founder of the company, are the first people\n", + "<|endoftext|>Co-founder of x, co-founder of the company, and co-founder of the company, and co-founder\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes software for the iPhone, has been arrested in the US.\n", + "\n", + "\n", + "\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x's, who has been working on the project for over a year, has been working on the project\n", + "<|endoftext|>Co-founder of xInsurance, a company that provides insurance for people with disabilities, said he's been in touch with\n", + "<|endoftext|>Co-founder of x, CEO Tim Cook, and co-founder of Facebook, Mark Zuckerberg, have been named to the\n", + "<|endoftext|>Co-founder of x, co-founder of the company Apple, and co-founder of the company Apple, and co\n", + "<|endoftext|>Co-founder of x, who has been a vocal critic of the company's recent decision to cut its workforce, has been\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to hide\n", + "<|endoftext|>Co-founder of x, a company that makes software for the iPhone, has been arrested in the US.\n", + "\n", + "\n", + "\n", + "<|endoftext|>Co-founder of x, a company that makes software for the web, has been arrested in the US.\n", + "\n", + "\n", + "\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x's, who has been working on the project for over a year, has been working on the project\n", + "<|endoftext|>Co-founder of xInsurance, a company that provides insurance for people with disabilities, said he's been in touch with\n", + "<|endoftext|>Co-founder of x, CEO Tim Cook, and co-founder of Facebook x, Mark Zuckerberg, are among the most\n", + "<|endoftext|>Co-founder of x, who has been a vocal critic of the company's iPhone 6 and iPhone 6 Plus, has been\n", + "<|endoftext|>Co-founder of x, who has been a vocal critic of the company's recent decision to cut its workforce, has been\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes software for the iPhone, has been arrested in the US.\n", + "\n", + "\n", + "\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x's, who has been working on the project for over a year, has been working on the project\n", + "<|endoftext|>Co-founder of x Watch, a company that helps people with disabilities, says he's been working on a new app for\n", + "<|endoftext|>Co-founder of x, CEO Tim Cook, and co-founder Steve Jobs.\n", + "\n", + "\n", + "The company's new CEO\n", + "<|endoftext|>Co-founder of x, who has been a vocal critic of the company's recent decision to cut its workforce by half,\n", + "<|endoftext|>Co-founder of x, co-founder of the company, and co-founder of the company, and co-founder\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been named the world's most valuable person by Forbes.\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes software for the iPhone, has been arrested in the US.\n", + "\n", + "\n", + "\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x's, who has been working on the project for over a year, has been working on the project\n", + "<|endoftext|>Co-founder of x Watch, a company that helps people with disabilities, says he's been working on a new app for\n", + "<|endoftext|>Co-founder of x, the company's new iPhone, is expected to be unveiled in the coming weeks.\n", + "\n", + "\n", + "\n", + "<|endoftext|>Co-founder of x, co-founder of the company, and co-founder of the company, and co-founder\n", + "<|endoftext|>Co-founder of x, co-founder of the company, and co-founder of the company, and co-founder\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been named the world's most valuable person by Forbes.\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been arrested in the US for allegedly selling a fake iPhone\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x's, who has been working on the project for over a year, has been working on the project\n", + "<|endoftext|>Co-founder of xInsurance, a company that provides insurance for people with disabilities, said he's been in touch with\n", + "<|endoftext|>Co-founder of x, Apple x, Apple x, Apple x, Apple x, Apple x, Apple x, Apple\n", + "<|endoftext|>Co-founder of x, who is now the CEO of Apple, has been named the new CEO of the company.\n", + "\n", + "<|endoftext|>Co-founder of x, co-founder of the company, and co-founder of the company's new product, the\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been named the world's most valuable person by Forbes.\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"bot\" to make a\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the iPhone, has been arrested in the US.\n", + "\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of xbaum.com, who has been a member of the XBAY community for over a decade,\n", + "<|endoftext|>Co-founder of x, Apple x, Apple x, Apple x, Apple x, Apple x, Apple x, Apple\n", + "<|endoftext|>Co-founder of x, who is now the CEO of Apple, has been named the new CEO of the company.\n", + "\n", + "<|endoftext|>Co-founder of x, co-founder of the company, and co-founder of the company, are both on the\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been named the world's most valuable person by Forbes.\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"bot\" to make a\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"bot\" to make a\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been arrested in the US for allegedly selling a fake iPhone\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x Inc. and co-founder of X.com, Mark Zuckerberg, has been accused of using his\n", + "<|endoftext|>Co-founder of x, Apple x, and Apple x.\n", + "Apple x, Apple x, and Apple x.\n", + "\n", + "<|endoftext|>Co-founder of x, a guest Jan 25th, 2016 1,929 Never a guest1,929Never\n", + "<|endoftext|>Co-founder of x, co-founder of the company, and co-founder of the company, are both on the\n", + "<|endoftext|>Co-founder of x, a company that makes the iPhone, has been named the new CEO of Apple.\n", + "\n", + "\n", + "\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"bot\" to make a\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"bot\" to make a\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been accused of using a \"fake\" name to sell\n", + "<|endoftext|>Co-founder of x, a company that makes smart phones, has been arrested in the US for allegedly stealing $1 million\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n", + "<|endoftext|>Co-founder of x Watch, a company that helps people with disabilities, says he's been working on a new app for\n", + "<|endoftext|>Co-founder of x, the iPhone, the iPhone, the iPhone, the iPhone, the iPhone, the iPhone, the\n", + "<|endoftext|>Co-founder of x, the new, the new, the new, the new, the new, the new, the\n", + "<|endoftext|>Co-founder of x, the company, the company, the company, the company, the company, the company, the\n", + "<|endoftext|>Co-founder of x, the company, the company, the company, the company, the company, the company, the\n", + "<|endoftext|>Co-founder of x, the company has announced that the company has released the company has released the company has released the company\n", + "<|endoftext|>Co-founder of x, the company, the company, the company, the company, the company, the company, the\n", + "<|endoftext|>Co-founder of x, the company, said the company is now offering a new product, but the company has now announced\n", + "<|endoftext|>Co-founder of x, the company that has been working on the iPhone, said the company has been working on the iPhone\n", + "<|endoftext|>Co-founder of x, the company that created the iPhone, said the company is now working on a new product, but\n", + "<|endoftext|>Co-founder of x, a company that makes the iPhone, said that the company is working on a new product that will\n", + "<|endoftext|>Co-founder of x, a company that makes the world's most popular mobile phone, has been arrested in the US.\n", + "<|endoftext|>Co-founder of x, a startup that helps people build apps for the web, has been arrested for allegedly stealing $1\n" + ] + } + ], + "source": [ + "# Still need an aboslute position\n", + "last_pos_id = len(model.to_tokens([\"Co-founder of x\"])[0]) - 1\n", + "target_prompt = [\"Co-founder of x\"]\n", + "\n", + "# Check all the combinations, you'll see that the model is able to generate \"Steve Jobs\" in several continuations\n", + "for source_layer_id in range(12):\n", + " # Prepare source representation, here we can use relative position\n", + " source_rep = get_source_representation(\n", + " prompts=[\"Co-founder of company Apple\"],\n", + " layer_id=source_layer_id,\n", + " model=model,\n", + " pos_id=-1\n", + " )\n", + " for target_layer_id in range(12):\n", + " target_f = partial(\n", + " feed_source_representation,\n", + " source_rep=source_rep,\n", + " prompt=target_prompt,\n", + " f=identity_function,\n", + " model=model,\n", + " pos_id=last_pos_id,\n", + " layer_id=target_layer_id\n", + " )\n", + " gen = generate_with_patching(model, target_prompt, target_f, max_new_tokens=20)\n", + " print(gen)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "mechinterp", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.19" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demos/Qwen.ipynb b/demos/Qwen.ipynb index bcde85d..fba5144 100644 --- a/demos/Qwen.ipynb +++ b/demos/Qwen.ipynb @@ -117,7 +117,7 @@ " # PySvelte is an unmaintained visualization library, use it as a backup if circuitsvis isn't working\n", " # # Install another version of node that makes PySvelte work way faster\n", " # !curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -; sudo apt-get install -y nodejs\n", - " # %pip install git+https://github.com/TransformerLensOrg/PySvelte.git\n", + " # %pip install git+https://github.com/neelnanda-io/PySvelte.git\n", "except:\n", " IN_COLAB = False\n", " print(\"Running as a Jupyter notebook - intended for development only!\")\n", diff --git a/demos/SVD_Interpreter_Demo.ipynb b/demos/SVD_Interpreter_Demo.ipynb index a1669de..0f1c380 100644 --- a/demos/SVD_Interpreter_Demo.ipynb +++ b/demos/SVD_Interpreter_Demo.ipynb @@ -71,10 +71,10 @@ " print(\"Running as a Colab notebook\")\n", " %pip install git+https://github.com/JayBaileyCS/TransformerLens.git # TODO: Change!\n", " # Install Neel's personal plotting utils\n", - " %pip install git+https://github.com/TransformerLensOrg/neel-plotly.git\n", + " %pip install git+https://github.com/neelnanda-io/neel-plotly.git\n", " # Install another version of node that makes PySvelte work way faster\n", " !curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -; sudo apt-get install -y nodejs\n", - " %pip install git+https://github.com/TransformerLensOrg/PySvelte.git\n", + " %pip install git+https://github.com/neelnanda-io/PySvelte.git\n", " # Needed for PySvelte to work, v3 came out and broke things...\n", " %pip install typeguard==2.13.3\n", " %pip install typing-extensions\n", diff --git a/demos/Santa_Coder.ipynb b/demos/Santa_Coder.ipynb index 936d2ff..0c95abd 100644 --- a/demos/Santa_Coder.ipynb +++ b/demos/Santa_Coder.ipynb @@ -40,7 +40,7 @@ " # PySvelte is an unmaintained visualization library, use it as a backup if circuitsvis isn't working\n", " # # Install another version of node that makes PySvelte work way faster\n", " # !curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -; sudo apt-get install -y nodejs\n", - " # %pip install git+https://github.com/TransformerLensOrg/PySvelte.git\n", + " # %pip install git+https://github.com/neelnanda-io/PySvelte.git\n", "except:\n", " IN_COLAB = False\n", " print(\"Running as a Jupyter notebook - intended for development only!\")\n", diff --git a/demos/T5.ipynb b/demos/T5.ipynb new file mode 100644 index 0000000..363073a --- /dev/null +++ b/demos/T5.ipynb @@ -0,0 +1,724 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running as a Jupyter notebook - intended for development only!\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/tmp/ipykernel_1686188/569054096.py:18: DeprecationWarning:\n", + "\n", + "`magic(...)` is deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s).\n", + "\n", + "/tmp/ipykernel_1686188/569054096.py:19: DeprecationWarning:\n", + "\n", + "`magic(...)` is deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s).\n", + "\n" + ] + } + ], + "source": [ + "# Janky code to do different setup when run in a Colab notebook vs VSCode\n", + "import os\n", + "\n", + "DEBUG_MODE = False\n", + "IN_GITHUB = os.getenv(\"GITHUB_ACTIONS\") == \"true\"\n", + "try:\n", + " import google.colab\n", + "\n", + " IN_COLAB = True\n", + " print(\"Running as a Colab notebook\")\n", + "except:\n", + " IN_COLAB = False\n", + " print(\"Running as a Jupyter notebook - intended for development only!\")\n", + " from IPython import get_ipython\n", + "\n", + " ipython = get_ipython()\n", + " # Code to automatically update the HookedTransformer code as its edited without restarting the kernel\n", + " ipython.magic(\"load_ext autoreload\")\n", + " ipython.magic(\"autoreload 2\")\n", + "\n", + "if IN_COLAB or IN_GITHUB:\n", + " %pip install transformer_lens\n", + " %pip install torchtyping\n", + " # Install my janky personal plotting utils\n", + " %pip install git+https://github.com/TransformerLensOrg/neel-plotly.git\n", + " # Install another version of node that makes PySvelte work way faster\n", + " %pip install circuitsvis\n", + " # Needed for PySvelte to work, v3 came out and broke things...\n", + " %pip install typeguard==2.13.3" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# Plotly needs a different renderer for VSCode/Notebooks vs Colab argh\n", + "import plotly.io as pio\n", + "\n", + "if IN_COLAB or not DEBUG_MODE:\n", + " # Thanks to annoying rendering issues, Plotly graphics will either show up in colab OR Vscode depending on the renderer - this is bad for developing demos! Thus creating a debug mode.\n", + " pio.renderers.default = \"colab\"\n", + "else:\n", + " pio.renderers.default = \"notebook_connected\"" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/dontsov/.local/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning:\n", + "\n", + "IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + "\n", + "/usr/lib/python3/dist-packages/requests/__init__.py:89: RequestsDependencyWarning:\n", + "\n", + "urllib3 (2.2.1) or chardet (3.0.4) doesn't match a supported version!\n", + "\n", + "WARNING:root:Support for T5 in TransformerLens is currently experimental, until such a time when it has feature parity with HookedTransformer and has been tested on real research tasks. Until then, backward compatibility is not guaranteed. Please see the docs for information on the limitations of the current implementation.\n", + "If using T5 for interpretability research, keep in mind that T5 has some significant architectural differences to GPT. The major one is that T5 is an Encoder-Decoder modelAlso, it uses relative positional embeddings, different types of Attention (without bias) and LayerNorm\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Moving model to device: cuda\n", + "Loaded pretrained model t5-small into HookedTransformer\n" + ] + } + ], + "source": [ + "# Imports\n", + "import torch\n", + "\n", + "from transformers import AutoTokenizer\n", + "from transformer_lens import HookedEncoderDecoder\n", + "\n", + "model_name = \"t5-small\"\n", + "model = HookedEncoderDecoder.from_pretrained(model_name)\n", + "tokenizer = AutoTokenizer.from_pretrained(model_name)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.set_grad_enabled(False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## basic sanity check - model generates smth" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "generated token: \"Bonjour\", token id: 21845\n", + "generated token: \",\", token id: 6\n", + "generated token: \"comment\", token id: 1670\n", + "generated token: \"\", token id: 3\n", + "generated token: \"êtes\", token id: 6738\n", + "generated token: \"-\", token id: 18\n", + "generated token: \"vous\", token id: 3249\n", + "generated token: \"?\", token id: 58\n", + "generated token: \"\", token id: 1\n", + "translate English to French: Hello, how are you? \n", + " Bonjour, comment êtes-vous?\n" + ] + } + ], + "source": [ + "prompt = \"translate English to French: Hello, how are you? \"\n", + "inputs = tokenizer(prompt, return_tensors=\"pt\")\n", + "input_ids = inputs[\"input_ids\"]\n", + "attention_mask = inputs[\"attention_mask\"]\n", + "decoder_input_ids = torch.tensor([[model.cfg.decoder_start_token_id]]).to(input_ids.device)\n", + "\n", + "\n", + "while True:\n", + " logits = model.forward(input=input_ids, one_zero_attention_mask=attention_mask, decoder_input=decoder_input_ids)\n", + " # logits.shape == (batch_size (1), predicted_pos, vocab_size)\n", + "\n", + " token_idx = torch.argmax(logits[0, -1, :]).item()\n", + " print(\"generated token: \\\"\", tokenizer.decode(token_idx), \"\\\", token id: \", token_idx, sep=\"\")\n", + "\n", + " # append token to decoder_input_ids\n", + " decoder_input_ids = torch.cat([decoder_input_ids, torch.tensor([[token_idx]]).to(input_ids.device)], dim=-1)\n", + "\n", + " # break if End-Of-Sequence token generated\n", + " if token_idx == tokenizer.eos_token_id:\n", + " break\n", + "\n", + "print(prompt, \"\\n\", tokenizer.decode(decoder_input_ids[0], skip_special_tokens=True))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### visualise encoder patterns" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import circuitsvis as cv\n", + "# Testing that the library works\n", + "cv.examples.hello(\"Neel\")" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "prompt = \"translate English to French: Hello, how are you? \"\n", + "inputs = tokenizer(prompt, return_tensors=\"pt\")\n", + "input_ids = inputs[\"input_ids\"]\n", + "attention_mask = inputs[\"attention_mask\"]\n", + "\n", + "\n", + "logits,cache = model.run_with_cache(input=input_ids, one_zero_attention_mask=attention_mask, decoder_input=decoder_input_ids, remove_batch_dim=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "hook_embed\n", + "encoder.0.hook_resid_pre\n", + "encoder.0.ln1.hook_scale\n", + "encoder.0.ln1.hook_normalized\n", + "encoder.0.attn.hook_q\n", + "encoder.0.attn.hook_k\n", + "encoder.0.attn.hook_v\n", + "encoder.0.attn.hook_attn_scores\n", + "encoder.0.attn.hook_pattern\n", + "encoder.0.attn.hook_z\n", + "encoder.0.hook_attn_out\n", + "encoder.0.hook_resid_mid\n", + "encoder.0.ln2.hook_scale\n", + "encoder.0.ln2.hook_normalized\n", + "encoder.0.mlp.hook_pre\n", + "encoder.0.mlp.hook_post\n", + "encoder.0.hook_mlp_out\n", + "encoder.0.hook_resid_post\n", + "encoder.1.hook_resid_pre\n", + "encoder.1.ln1.hook_scale\n", + "encoder.1.ln1.hook_normalized\n", + "encoder.1.attn.hook_q\n", + "encoder.1.attn.hook_k\n", + "encoder.1.attn.hook_v\n", + "encoder.1.attn.hook_attn_scores\n", + "encoder.1.attn.hook_pattern\n", + "encoder.1.attn.hook_z\n", + "encoder.1.hook_attn_out\n", + "encoder.1.hook_resid_mid\n", + "encoder.1.ln2.hook_scale\n", + "encoder.1.ln2.hook_normalized\n", + "encoder.1.mlp.hook_pre\n", + "encoder.1.mlp.hook_post\n", + "encoder.1.hook_mlp_out\n", + "encoder.1.hook_resid_post\n", + "encoder.2.hook_resid_pre\n", + "encoder.2.ln1.hook_scale\n", + "encoder.2.ln1.hook_normalized\n", + "encoder.2.attn.hook_q\n", + "encoder.2.attn.hook_k\n", + "encoder.2.attn.hook_v\n", + "encoder.2.attn.hook_attn_scores\n", + "encoder.2.attn.hook_pattern\n", + "encoder.2.attn.hook_z\n", + "encoder.2.hook_attn_out\n", + "encoder.2.hook_resid_mid\n", + "encoder.2.ln2.hook_scale\n", + "encoder.2.ln2.hook_normalized\n", + "encoder.2.mlp.hook_pre\n", + "encoder.2.mlp.hook_post\n", + "encoder.2.hook_mlp_out\n", + "encoder.2.hook_resid_post\n", + "encoder.3.hook_resid_pre\n", + "encoder.3.ln1.hook_scale\n", + "encoder.3.ln1.hook_normalized\n", + "encoder.3.attn.hook_q\n", + "encoder.3.attn.hook_k\n", + "encoder.3.attn.hook_v\n", + "encoder.3.attn.hook_attn_scores\n", + "encoder.3.attn.hook_pattern\n", + "encoder.3.attn.hook_z\n", + "encoder.3.hook_attn_out\n", + "encoder.3.hook_resid_mid\n", + "encoder.3.ln2.hook_scale\n", + "encoder.3.ln2.hook_normalized\n", + "encoder.3.mlp.hook_pre\n", + "encoder.3.mlp.hook_post\n", + "encoder.3.hook_mlp_out\n", + "encoder.3.hook_resid_post\n", + "encoder.4.hook_resid_pre\n", + "encoder.4.ln1.hook_scale\n", + "encoder.4.ln1.hook_normalized\n", + "encoder.4.attn.hook_q\n", + "encoder.4.attn.hook_k\n", + "encoder.4.attn.hook_v\n", + "encoder.4.attn.hook_attn_scores\n", + "encoder.4.attn.hook_pattern\n", + "encoder.4.attn.hook_z\n", + "encoder.4.hook_attn_out\n", + "encoder.4.hook_resid_mid\n", + "encoder.4.ln2.hook_scale\n", + "encoder.4.ln2.hook_normalized\n", + "encoder.4.mlp.hook_pre\n", + "encoder.4.mlp.hook_post\n", + "encoder.4.hook_mlp_out\n", + "encoder.4.hook_resid_post\n", + "encoder.5.hook_resid_pre\n", + "encoder.5.ln1.hook_scale\n", + "encoder.5.ln1.hook_normalized\n", + "encoder.5.attn.hook_q\n", + "encoder.5.attn.hook_k\n", + "encoder.5.attn.hook_v\n", + "encoder.5.attn.hook_attn_scores\n", + "encoder.5.attn.hook_pattern\n", + "encoder.5.attn.hook_z\n", + "encoder.5.hook_attn_out\n", + "encoder.5.hook_resid_mid\n", + "encoder.5.ln2.hook_scale\n", + "encoder.5.ln2.hook_normalized\n", + "encoder.5.mlp.hook_pre\n", + "encoder.5.mlp.hook_post\n", + "encoder.5.hook_mlp_out\n", + "encoder.5.hook_resid_post\n", + "encoder_final_ln.hook_scale\n", + "encoder_final_ln.hook_normalized\n", + "decoder.0.hook_resid_pre\n", + "decoder.0.ln1.hook_scale\n", + "decoder.0.ln1.hook_normalized\n", + "decoder.0.attn.hook_q\n", + "decoder.0.attn.hook_k\n", + "decoder.0.attn.hook_v\n", + "decoder.0.attn.hook_attn_scores\n", + "decoder.0.attn.hook_pattern\n", + "decoder.0.attn.hook_z\n", + "decoder.0.hook_attn_out\n", + "decoder.0.hook_resid_mid\n", + "decoder.0.ln2.hook_scale\n", + "decoder.0.ln2.hook_normalized\n", + "decoder.0.cross_attn.hook_q\n", + "decoder.0.cross_attn.hook_k\n", + "decoder.0.cross_attn.hook_v\n", + "decoder.0.cross_attn.hook_attn_scores\n", + "decoder.0.cross_attn.hook_pattern\n", + "decoder.0.cross_attn.hook_z\n", + "decoder.0.hook_cross_attn_out\n", + "decoder.0.hook_resid_mid_cross\n", + "decoder.0.ln3.hook_scale\n", + "decoder.0.ln3.hook_normalized\n", + "decoder.0.mlp.hook_pre\n", + "decoder.0.mlp.hook_post\n", + "decoder.0.hook_mlp_out\n", + "decoder.0.hook_resid_post\n", + "decoder.1.hook_resid_pre\n", + "decoder.1.ln1.hook_scale\n", + "decoder.1.ln1.hook_normalized\n", + "decoder.1.attn.hook_q\n", + "decoder.1.attn.hook_k\n", + "decoder.1.attn.hook_v\n", + "decoder.1.attn.hook_attn_scores\n", + "decoder.1.attn.hook_pattern\n", + "decoder.1.attn.hook_z\n", + "decoder.1.hook_attn_out\n", + "decoder.1.hook_resid_mid\n", + "decoder.1.ln2.hook_scale\n", + "decoder.1.ln2.hook_normalized\n", + "decoder.1.cross_attn.hook_q\n", + "decoder.1.cross_attn.hook_k\n", + "decoder.1.cross_attn.hook_v\n", + "decoder.1.cross_attn.hook_attn_scores\n", + "decoder.1.cross_attn.hook_pattern\n", + "decoder.1.cross_attn.hook_z\n", + "decoder.1.hook_cross_attn_out\n", + "decoder.1.hook_resid_mid_cross\n", + "decoder.1.ln3.hook_scale\n", + "decoder.1.ln3.hook_normalized\n", + "decoder.1.mlp.hook_pre\n", + "decoder.1.mlp.hook_post\n", + "decoder.1.hook_mlp_out\n", + "decoder.1.hook_resid_post\n", + "decoder.2.hook_resid_pre\n", + "decoder.2.ln1.hook_scale\n", + "decoder.2.ln1.hook_normalized\n", + "decoder.2.attn.hook_q\n", + "decoder.2.attn.hook_k\n", + "decoder.2.attn.hook_v\n", + "decoder.2.attn.hook_attn_scores\n", + "decoder.2.attn.hook_pattern\n", + "decoder.2.attn.hook_z\n", + "decoder.2.hook_attn_out\n", + "decoder.2.hook_resid_mid\n", + "decoder.2.ln2.hook_scale\n", + "decoder.2.ln2.hook_normalized\n", + "decoder.2.cross_attn.hook_q\n", + "decoder.2.cross_attn.hook_k\n", + "decoder.2.cross_attn.hook_v\n", + "decoder.2.cross_attn.hook_attn_scores\n", + "decoder.2.cross_attn.hook_pattern\n", + "decoder.2.cross_attn.hook_z\n", + "decoder.2.hook_cross_attn_out\n", + "decoder.2.hook_resid_mid_cross\n", + "decoder.2.ln3.hook_scale\n", + "decoder.2.ln3.hook_normalized\n", + "decoder.2.mlp.hook_pre\n", + "decoder.2.mlp.hook_post\n", + "decoder.2.hook_mlp_out\n", + "decoder.2.hook_resid_post\n", + "decoder.3.hook_resid_pre\n", + "decoder.3.ln1.hook_scale\n", + "decoder.3.ln1.hook_normalized\n", + "decoder.3.attn.hook_q\n", + "decoder.3.attn.hook_k\n", + "decoder.3.attn.hook_v\n", + "decoder.3.attn.hook_attn_scores\n", + "decoder.3.attn.hook_pattern\n", + "decoder.3.attn.hook_z\n", + "decoder.3.hook_attn_out\n", + "decoder.3.hook_resid_mid\n", + "decoder.3.ln2.hook_scale\n", + "decoder.3.ln2.hook_normalized\n", + "decoder.3.cross_attn.hook_q\n", + "decoder.3.cross_attn.hook_k\n", + "decoder.3.cross_attn.hook_v\n", + "decoder.3.cross_attn.hook_attn_scores\n", + "decoder.3.cross_attn.hook_pattern\n", + "decoder.3.cross_attn.hook_z\n", + "decoder.3.hook_cross_attn_out\n", + "decoder.3.hook_resid_mid_cross\n", + "decoder.3.ln3.hook_scale\n", + "decoder.3.ln3.hook_normalized\n", + "decoder.3.mlp.hook_pre\n", + "decoder.3.mlp.hook_post\n", + "decoder.3.hook_mlp_out\n", + "decoder.3.hook_resid_post\n", + "decoder.4.hook_resid_pre\n", + "decoder.4.ln1.hook_scale\n", + "decoder.4.ln1.hook_normalized\n", + "decoder.4.attn.hook_q\n", + "decoder.4.attn.hook_k\n", + "decoder.4.attn.hook_v\n", + "decoder.4.attn.hook_attn_scores\n", + "decoder.4.attn.hook_pattern\n", + "decoder.4.attn.hook_z\n", + "decoder.4.hook_attn_out\n", + "decoder.4.hook_resid_mid\n", + "decoder.4.ln2.hook_scale\n", + "decoder.4.ln2.hook_normalized\n", + "decoder.4.cross_attn.hook_q\n", + "decoder.4.cross_attn.hook_k\n", + "decoder.4.cross_attn.hook_v\n", + "decoder.4.cross_attn.hook_attn_scores\n", + "decoder.4.cross_attn.hook_pattern\n", + "decoder.4.cross_attn.hook_z\n", + "decoder.4.hook_cross_attn_out\n", + "decoder.4.hook_resid_mid_cross\n", + "decoder.4.ln3.hook_scale\n", + "decoder.4.ln3.hook_normalized\n", + "decoder.4.mlp.hook_pre\n", + "decoder.4.mlp.hook_post\n", + "decoder.4.hook_mlp_out\n", + "decoder.4.hook_resid_post\n", + "decoder.5.hook_resid_pre\n", + "decoder.5.ln1.hook_scale\n", + "decoder.5.ln1.hook_normalized\n", + "decoder.5.attn.hook_q\n", + "decoder.5.attn.hook_k\n", + "decoder.5.attn.hook_v\n", + "decoder.5.attn.hook_attn_scores\n", + "decoder.5.attn.hook_pattern\n", + "decoder.5.attn.hook_z\n", + "decoder.5.hook_attn_out\n", + "decoder.5.hook_resid_mid\n", + "decoder.5.ln2.hook_scale\n", + "decoder.5.ln2.hook_normalized\n", + "decoder.5.cross_attn.hook_q\n", + "decoder.5.cross_attn.hook_k\n", + "decoder.5.cross_attn.hook_v\n", + "decoder.5.cross_attn.hook_attn_scores\n", + "decoder.5.cross_attn.hook_pattern\n", + "decoder.5.cross_attn.hook_z\n", + "decoder.5.hook_cross_attn_out\n", + "decoder.5.hook_resid_mid_cross\n", + "decoder.5.ln3.hook_scale\n", + "decoder.5.ln3.hook_normalized\n", + "decoder.5.mlp.hook_pre\n", + "decoder.5.mlp.hook_post\n", + "decoder.5.hook_mlp_out\n", + "decoder.5.hook_resid_post\n", + "decoder_final_ln.hook_scale\n", + "decoder_final_ln.hook_normalized\n" + ] + } + ], + "source": [ + "# the usual way of indexing cache via cache[\"pattetn\",0,\"attn\"] does not work\n", + "# besause it uses cache[\"block.0....] indexing\n", + "# t5 is implementes as separate stack of blocks for encoder and decoder\n", + "# so indexing is cache[\"encoder.0..\"], cache[\"decoder.0..\"] \n", + "# lets see what is in cache and choose the right key for encoder attention pattern on layer 0\n", + "print(\"\\n\".join(cache.keys()))" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "encoder_attn_pattern = cache[\"encoder.0.attn.hook_pattern\"]\n", + "input_str_tokens = [w.lstrip(\"▁\") for w in tokenizer.convert_ids_to_tokens(input_ids[0])]" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "cv.attention.attention_patterns(tokens=input_str_tokens, attention=encoder_attn_pattern)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### visualise decoder pattern" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['', '▁Bonjour', ',', '▁comment', '▁', 'êtes', '-', 'vous', '?', '']" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "decoder_str_tokens = tokenizer.convert_ids_to_tokens(decoder_input_ids[0])\n", + "decoder_str_tokens" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "decoder_attn_pattern = cache[\"decoder.0.attn.hook_pattern\"]\n", + "cv.attention.attention_patterns(tokens=decoder_str_tokens, attention=decoder_attn_pattern)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## topk tokens visualisation" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# list of samples of shape (n_layers, n_tokens, n_neurons) for each sample\n", + "# i take the activations after the mlp layer\n", + "# you can also pass the activations after the attention layer (hook_attn_out),\n", + "# after the cross attention layer (hook_cross_attn_out) or after the mlp layer (hook_mlp_out)\n", + "activations = [\n", + " torch.stack([cache[f\"decoder.{layer}.hook_mlp_out\"] for layer in range(model.cfg.n_layers)]).cpu().numpy()\n", + " ]\n", + "\n", + "# list of samples of shape (n_tokens)\n", + "tokens = [decoder_str_tokens]\n", + "\n", + "# if we have an arbitrary selection of layers, when change the layer labels, now just pass the layer index\n", + "layer_labels = [i for i in range(model.cfg.n_layers)]\n", + "\n", + "\n", + "cv.topk_tokens.topk_tokens(\n", + " tokens=tokens,\n", + " activations=activations, \n", + " max_k=10, \n", + " first_dimension_name=\"Layer\", \n", + " first_dimension_labels=layer_labels,\n", + " third_dimension_name=\"Neuron\",\n", + ")\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demos/Tracr_to_Transformer_Lens_Demo.ipynb b/demos/Tracr_to_Transformer_Lens_Demo.ipynb index b55e57b..bb1bb54 100644 --- a/demos/Tracr_to_Transformer_Lens_Demo.ipynb +++ b/demos/Tracr_to_Transformer_Lens_Demo.ipynb @@ -65,7 +65,7 @@ " print(\"Running as a Colab notebook\")\n", " %pip install transformer_lens\n", " # Fork of Tracr that's backward compatible with Python 3.8\n", - " %pip install git+https://github.com/TransformerLensOrg/Tracr\n", + " %pip install git+https://github.com/neelnanda-io/Tracr\n", " \n", "except:\n", " IN_COLAB = False\n", diff --git a/demos/test.ipynb b/demos/test.ipynb deleted file mode 100644 index a3340b4..0000000 --- a/demos/test.ipynb +++ /dev/null @@ -1,78 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "ename": "OSError", - "evalue": "You are trying to access a gated repo.\nMake sure to have access to it at https://huggingface.co/meta-llama/Meta-Llama-3-8B.\n401 Client Error. (Request ID: Root=1-662aa17c-23adf8063ca56f12201bef1d;fee43197-2550-4397-a2e4-114eedae301c)\n\nCannot access gated repo for url https://huggingface.co/meta-llama/Meta-Llama-3-8B/resolve/main/config.json.\nAccess to model meta-llama/Meta-Llama-3-8B is restricted. You must be authenticated to access it.", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mHTTPError\u001b[0m Traceback (most recent call last)", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py:304\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 303\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 304\u001b[0m \u001b[43mresponse\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mraise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 305\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m HTTPError \u001b[38;5;28;01mas\u001b[39;00m e:\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/requests/models.py:1021\u001b[0m, in \u001b[0;36mResponse.raise_for_status\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m http_error_msg:\n\u001b[0;32m-> 1021\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m HTTPError(http_error_msg, response\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m)\n", - "\u001b[0;31mHTTPError\u001b[0m: 401 Client Error: Unauthorized for url: https://huggingface.co/meta-llama/Meta-Llama-3-8B/resolve/main/config.json", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001b[0;31mGatedRepoError\u001b[0m Traceback (most recent call last)", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/transformers/utils/hub.py:398\u001b[0m, in \u001b[0;36mcached_file\u001b[0;34m(path_or_repo_id, filename, cache_dir, force_download, resume_download, proxies, token, revision, local_files_only, subfolder, repo_type, user_agent, _raise_exceptions_for_gated_repo, _raise_exceptions_for_missing_entries, _raise_exceptions_for_connection_errors, _commit_hash, **deprecated_kwargs)\u001b[0m\n\u001b[1;32m 396\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 397\u001b[0m \u001b[38;5;66;03m# Load from URL or cache if already cached\u001b[39;00m\n\u001b[0;32m--> 398\u001b[0m resolved_file \u001b[38;5;241m=\u001b[39m \u001b[43mhf_hub_download\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 399\u001b[0m \u001b[43m \u001b[49m\u001b[43mpath_or_repo_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 400\u001b[0m \u001b[43m \u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 401\u001b[0m \u001b[43m \u001b[49m\u001b[43msubfolder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43msubfolder\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m==\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43msubfolder\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 402\u001b[0m \u001b[43m \u001b[49m\u001b[43mrepo_type\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrepo_type\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 403\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 404\u001b[0m \u001b[43m \u001b[49m\u001b[43mcache_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcache_dir\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 405\u001b[0m \u001b[43m \u001b[49m\u001b[43muser_agent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muser_agent\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 406\u001b[0m \u001b[43m \u001b[49m\u001b[43mforce_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mforce_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 407\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 408\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 409\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 410\u001b[0m \u001b[43m \u001b[49m\u001b[43mlocal_files_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlocal_files_only\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 411\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 412\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m GatedRepoError \u001b[38;5;28;01mas\u001b[39;00m e:\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py:119\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 117\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 119\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:1403\u001b[0m, in \u001b[0;36mhf_hub_download\u001b[0;34m(repo_id, filename, subfolder, repo_type, revision, library_name, library_version, cache_dir, local_dir, local_dir_use_symlinks, user_agent, force_download, force_filename, proxies, etag_timeout, resume_download, token, local_files_only, headers, legacy_cache_layout, endpoint)\u001b[0m\n\u001b[1;32m 1401\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(head_call_error, RepositoryNotFoundError) \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(head_call_error, GatedRepoError):\n\u001b[1;32m 1402\u001b[0m \u001b[38;5;66;03m# Repo not found or gated => let's raise the actual error\u001b[39;00m\n\u001b[0;32m-> 1403\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m head_call_error\n\u001b[1;32m 1404\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1405\u001b[0m \u001b[38;5;66;03m# Otherwise: most likely a connection issue or Hub downtime => let's warn the user\u001b[39;00m\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:1261\u001b[0m, in \u001b[0;36mhf_hub_download\u001b[0;34m(repo_id, filename, subfolder, repo_type, revision, library_name, library_version, cache_dir, local_dir, local_dir_use_symlinks, user_agent, force_download, force_filename, proxies, etag_timeout, resume_download, token, local_files_only, headers, legacy_cache_layout, endpoint)\u001b[0m\n\u001b[1;32m 1260\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1261\u001b[0m metadata \u001b[38;5;241m=\u001b[39m \u001b[43mget_hf_file_metadata\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1262\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1263\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1264\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1265\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43metag_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1266\u001b[0m \u001b[43m \u001b[49m\u001b[43mlibrary_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlibrary_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1267\u001b[0m \u001b[43m \u001b[49m\u001b[43mlibrary_version\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlibrary_version\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1268\u001b[0m \u001b[43m \u001b[49m\u001b[43muser_agent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muser_agent\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1269\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1270\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m EntryNotFoundError \u001b[38;5;28;01mas\u001b[39;00m http_error:\n\u001b[1;32m 1271\u001b[0m \u001b[38;5;66;03m# Cache the non-existence of the file and raise\u001b[39;00m\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py:119\u001b[0m, in \u001b[0;36mvalidate_hf_hub_args.._inner_fn\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 117\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m smoothly_deprecate_use_auth_token(fn_name\u001b[38;5;241m=\u001b[39mfn\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m, has_token\u001b[38;5;241m=\u001b[39mhas_token, kwargs\u001b[38;5;241m=\u001b[39mkwargs)\n\u001b[0;32m--> 119\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:1674\u001b[0m, in \u001b[0;36mget_hf_file_metadata\u001b[0;34m(url, token, proxies, timeout, library_name, library_version, user_agent, headers)\u001b[0m\n\u001b[1;32m 1673\u001b[0m \u001b[38;5;66;03m# Retrieve metadata\u001b[39;00m\n\u001b[0;32m-> 1674\u001b[0m r \u001b[38;5;241m=\u001b[39m \u001b[43m_request_wrapper\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1675\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mHEAD\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1676\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1677\u001b[0m \u001b[43m \u001b[49m\u001b[43mheaders\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mheaders\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1678\u001b[0m \u001b[43m \u001b[49m\u001b[43mallow_redirects\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 1679\u001b[0m \u001b[43m \u001b[49m\u001b[43mfollow_relative_redirects\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 1680\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1681\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1682\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1683\u001b[0m hf_raise_for_status(r)\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:369\u001b[0m, in \u001b[0;36m_request_wrapper\u001b[0;34m(method, url, follow_relative_redirects, **params)\u001b[0m\n\u001b[1;32m 368\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m follow_relative_redirects:\n\u001b[0;32m--> 369\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43m_request_wrapper\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 370\u001b[0m \u001b[43m \u001b[49m\u001b[43mmethod\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmethod\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 371\u001b[0m \u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 372\u001b[0m \u001b[43m \u001b[49m\u001b[43mfollow_relative_redirects\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 373\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mparams\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 374\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 376\u001b[0m \u001b[38;5;66;03m# If redirection, we redirect only relative paths.\u001b[39;00m\n\u001b[1;32m 377\u001b[0m \u001b[38;5;66;03m# This is useful in case of a renamed repository.\u001b[39;00m\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/huggingface_hub/file_download.py:393\u001b[0m, in \u001b[0;36m_request_wrapper\u001b[0;34m(method, url, follow_relative_redirects, **params)\u001b[0m\n\u001b[1;32m 392\u001b[0m response \u001b[38;5;241m=\u001b[39m get_session()\u001b[38;5;241m.\u001b[39mrequest(method\u001b[38;5;241m=\u001b[39mmethod, url\u001b[38;5;241m=\u001b[39murl, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mparams)\n\u001b[0;32m--> 393\u001b[0m \u001b[43mhf_raise_for_status\u001b[49m\u001b[43m(\u001b[49m\u001b[43mresponse\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 394\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m response\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py:321\u001b[0m, in \u001b[0;36mhf_raise_for_status\u001b[0;34m(response, endpoint_name)\u001b[0m\n\u001b[1;32m 318\u001b[0m message \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 319\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39mstatus_code\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m Client Error.\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot access gated repo for url \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresponse\u001b[38;5;241m.\u001b[39murl\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 320\u001b[0m )\n\u001b[0;32m--> 321\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m GatedRepoError(message, response) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[1;32m 323\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m error_message \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAccess to this resource is disabled.\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n", - "\u001b[0;31mGatedRepoError\u001b[0m: 401 Client Error. (Request ID: Root=1-662aa17c-23adf8063ca56f12201bef1d;fee43197-2550-4397-a2e4-114eedae301c)\n\nCannot access gated repo for url https://huggingface.co/meta-llama/Meta-Llama-3-8B/resolve/main/config.json.\nAccess to model meta-llama/Meta-Llama-3-8B is restricted. You must be authenticated to access it.", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001b[0;31mOSError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[1], line 5\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtransformer_lens\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m HookedTransformer\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\n\u001b[0;32m----> 5\u001b[0m hf_model \u001b[38;5;241m=\u001b[39m \u001b[43mAutoModelForCausalLM\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_pretrained\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 6\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmeta-llama/Meta-Llama-3-8B\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice_map\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmps\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtorch_dtype\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfloat16\u001b[49m\n\u001b[1;32m 7\u001b[0m \u001b[43m)\u001b[49m\n\u001b[1;32m 8\u001b[0m HookedTransformer\u001b[38;5;241m.\u001b[39mfrom_pretrained(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmeta-llama/Meta-Llama-3-8B\u001b[39m\u001b[38;5;124m\"\u001b[39m, hf_model\u001b[38;5;241m=\u001b[39mhf_model)\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:523\u001b[0m, in \u001b[0;36m_BaseAutoModelClass.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[1;32m 520\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquantization_config\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 521\u001b[0m _ \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mquantization_config\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 523\u001b[0m config, kwargs \u001b[38;5;241m=\u001b[39m \u001b[43mAutoConfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_pretrained\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 524\u001b[0m \u001b[43m \u001b[49m\u001b[43mpretrained_model_name_or_path\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 525\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_unused_kwargs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 526\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrust_remote_code\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtrust_remote_code\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 527\u001b[0m \u001b[43m \u001b[49m\u001b[43mcode_revision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcode_revision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 528\u001b[0m \u001b[43m \u001b[49m\u001b[43m_commit_hash\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcommit_hash\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 529\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mhub_kwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 530\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 531\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 533\u001b[0m \u001b[38;5;66;03m# if torch_dtype=auto was passed here, ensure to pass it on\u001b[39;00m\n\u001b[1;32m 534\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m kwargs_orig\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtorch_dtype\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m) \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py:1138\u001b[0m, in \u001b[0;36mAutoConfig.from_pretrained\u001b[0;34m(cls, pretrained_model_name_or_path, **kwargs)\u001b[0m\n\u001b[1;32m 1135\u001b[0m trust_remote_code \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtrust_remote_code\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[1;32m 1136\u001b[0m code_revision \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcode_revision\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m)\n\u001b[0;32m-> 1138\u001b[0m config_dict, unused_kwargs \u001b[38;5;241m=\u001b[39m \u001b[43mPretrainedConfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_config_dict\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpretrained_model_name_or_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1139\u001b[0m has_remote_code \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto_map\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m config_dict \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mAutoConfig\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m config_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mauto_map\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[1;32m 1140\u001b[0m has_local_code \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_type\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m config_dict \u001b[38;5;129;01mand\u001b[39;00m config_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_type\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;129;01min\u001b[39;00m CONFIG_MAPPING\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/transformers/configuration_utils.py:631\u001b[0m, in \u001b[0;36mPretrainedConfig.get_config_dict\u001b[0;34m(cls, pretrained_model_name_or_path, **kwargs)\u001b[0m\n\u001b[1;32m 629\u001b[0m original_kwargs \u001b[38;5;241m=\u001b[39m copy\u001b[38;5;241m.\u001b[39mdeepcopy(kwargs)\n\u001b[1;32m 630\u001b[0m \u001b[38;5;66;03m# Get config dict associated with the base config file\u001b[39;00m\n\u001b[0;32m--> 631\u001b[0m config_dict, kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_config_dict\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpretrained_model_name_or_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 632\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_commit_hash\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m config_dict:\n\u001b[1;32m 633\u001b[0m original_kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_commit_hash\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m config_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_commit_hash\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/transformers/configuration_utils.py:686\u001b[0m, in \u001b[0;36mPretrainedConfig._get_config_dict\u001b[0;34m(cls, pretrained_model_name_or_path, **kwargs)\u001b[0m\n\u001b[1;32m 682\u001b[0m configuration_file \u001b[38;5;241m=\u001b[39m kwargs\u001b[38;5;241m.\u001b[39mpop(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_configuration_file\u001b[39m\u001b[38;5;124m\"\u001b[39m, CONFIG_NAME)\n\u001b[1;32m 684\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 685\u001b[0m \u001b[38;5;66;03m# Load from local folder or from cache or download from model Hub and cache\u001b[39;00m\n\u001b[0;32m--> 686\u001b[0m resolved_config_file \u001b[38;5;241m=\u001b[39m \u001b[43mcached_file\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 687\u001b[0m \u001b[43m \u001b[49m\u001b[43mpretrained_model_name_or_path\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 688\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfiguration_file\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 689\u001b[0m \u001b[43m \u001b[49m\u001b[43mcache_dir\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcache_dir\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 690\u001b[0m \u001b[43m \u001b[49m\u001b[43mforce_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mforce_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 691\u001b[0m \u001b[43m \u001b[49m\u001b[43mproxies\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mproxies\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 692\u001b[0m \u001b[43m \u001b[49m\u001b[43mresume_download\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mresume_download\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 693\u001b[0m \u001b[43m \u001b[49m\u001b[43mlocal_files_only\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlocal_files_only\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 694\u001b[0m \u001b[43m \u001b[49m\u001b[43mtoken\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtoken\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 695\u001b[0m \u001b[43m \u001b[49m\u001b[43muser_agent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43muser_agent\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 696\u001b[0m \u001b[43m \u001b[49m\u001b[43mrevision\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrevision\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 697\u001b[0m \u001b[43m \u001b[49m\u001b[43msubfolder\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msubfolder\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 698\u001b[0m \u001b[43m \u001b[49m\u001b[43m_commit_hash\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcommit_hash\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 699\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 700\u001b[0m commit_hash \u001b[38;5;241m=\u001b[39m extract_commit_hash(resolved_config_file, commit_hash)\n\u001b[1;32m 701\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mEnvironmentError\u001b[39;00m:\n\u001b[1;32m 702\u001b[0m \u001b[38;5;66;03m# Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to\u001b[39;00m\n\u001b[1;32m 703\u001b[0m \u001b[38;5;66;03m# the original exception.\u001b[39;00m\n", - "File \u001b[0;32m~/Projects/Lingwave/TransformerLens/.venv/lib/python3.11/site-packages/transformers/utils/hub.py:416\u001b[0m, in \u001b[0;36mcached_file\u001b[0;34m(path_or_repo_id, filename, cache_dir, force_download, resume_download, proxies, token, revision, local_files_only, subfolder, repo_type, user_agent, _raise_exceptions_for_gated_repo, _raise_exceptions_for_missing_entries, _raise_exceptions_for_connection_errors, _commit_hash, **deprecated_kwargs)\u001b[0m\n\u001b[1;32m 414\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m resolved_file \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m _raise_exceptions_for_gated_repo:\n\u001b[1;32m 415\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m resolved_file\n\u001b[0;32m--> 416\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mEnvironmentError\u001b[39;00m(\n\u001b[1;32m 417\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mYou are trying to access a gated repo.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mMake sure to have access to it at \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 418\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttps://huggingface.co/\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpath_or_repo_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mstr\u001b[39m(e)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 419\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[1;32m 420\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m RepositoryNotFoundError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 421\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mEnvironmentError\u001b[39;00m(\n\u001b[1;32m 422\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mpath_or_repo_id\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m is not a local folder and is not a valid model identifier \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 423\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlisted on \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mhttps://huggingface.co/models\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mIf this is a private repository, make sure to pass a token \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 424\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhaving permission to this repo either by logging in with `huggingface-cli login` or by passing \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 425\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`token=`\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 426\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n", - "\u001b[0;31mOSError\u001b[0m: You are trying to access a gated repo.\nMake sure to have access to it at https://huggingface.co/meta-llama/Meta-Llama-3-8B.\n401 Client Error. (Request ID: Root=1-662aa17c-23adf8063ca56f12201bef1d;fee43197-2550-4397-a2e4-114eedae301c)\n\nCannot access gated repo for url https://huggingface.co/meta-llama/Meta-Llama-3-8B/resolve/main/config.json.\nAccess to model meta-llama/Meta-Llama-3-8B is restricted. You must be authenticated to access it." - ] - } - ], - "source": [ - "from transformers import AutoModelForCausalLM\n", - "import os\n", - "from transformer_lens import HookedTransformer\n", - "import torch\n", - "\n", - "os.environ[\"HF_TOKEN\"] = \"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\"\n", - "\n", - "hf_model = AutoModelForCausalLM.from_pretrained(\n", - " \"meta-llama/Meta-Llama-3-8B\", device_map=\"mps\", torch_dtype=torch.float16\n", - ")\n", - "HookedTransformer.from_pretrained(\"meta-llama/Meta-Llama-3-8B\", hf_model=hf_model)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.8" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/tests/acceptance/test_activation_cache.py b/tests/acceptance/test_activation_cache.py index 985221a..7547f57 100644 --- a/tests/acceptance/test_activation_cache.py +++ b/tests/acceptance/test_activation_cache.py @@ -1,7 +1,9 @@ +import pytest import torch from fancy_einsum import einsum -from transformer_lens import HookedTransformer +from transformer_lens import HookedTransformer, utils +from transformer_lens.utils import Slice # Create IOI prompts ioi_prompt_formats = [ @@ -85,7 +87,37 @@ def test_logit_attrs_matches_reference_code(): ) ave_logit_diffs = logit_diffs.mean(dim=-1) - assert torch.isclose(ref_ave_logit_diffs, ave_logit_diffs, atol=1e-7).all() + assert torch.isclose(ref_ave_logit_diffs, ave_logit_diffs, atol=1.1e-7).all() + + +@torch.no_grad +def test_logit_accumulated_resid_on_last_layer_variants(): + model = load_model("solu-2l") + tokens, answer_tokens = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + accumulated_resid = cache.accumulated_resid(layer=-1, incl_mid=True, pos_slice=-1) + assert torch.equal( + accumulated_resid, + cache.accumulated_resid(layer=model.cfg.n_layers, incl_mid=True, pos_slice=-1), + ) + + assert torch.equal( + accumulated_resid, cache.accumulated_resid(layer=None, incl_mid=True, pos_slice=-1) + ) + + +@torch.no_grad +def test_logit_accumulated_resid_without_mid(): + model = load_model("solu-2l") + tokens, answer_tokens = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + accumulated_resid, labels = cache.accumulated_resid( + layer=-1, incl_mid=False, pos_slice=-1, return_labels=True + ) + assert len(labels) == accumulated_resid.size(0) + assert all("mid" not in label for label in labels) @torch.no_grad @@ -125,7 +157,7 @@ def test_logit_attrs_works_for_all_input_shapes(): logit_diffs = cache.logit_attrs( accumulated_residual, batch_slice=batch, - pos_slice=-1, + pos_slice=Slice(-1), tokens=answer_tokens[batch, 0], incorrect_tokens=answer_tokens[batch, 1], ) @@ -135,7 +167,7 @@ def test_logit_attrs_works_for_all_input_shapes(): batch = -1 logit_diffs = cache.logit_attrs( accumulated_residual, - batch_slice=batch, + batch_slice=Slice(batch), pos_slice=-1, tokens=int(answer_tokens[batch, 0]), incorrect_tokens=int(answer_tokens[batch, 1]), @@ -176,6 +208,29 @@ def test_logit_attrs_works_for_all_input_shapes(): ) assert torch.isclose(ref_logit_diffs[:, batch], logit_diffs).all() + # Different shape for tokens and incorrect_tokens + with pytest.raises(ValueError): + cache.logit_attrs( + accumulated_residual[:, batch, :], + has_batch_dim=False, + batch_slice=batch, + pos_slice=-1, + tokens=answer_tokens[batch, 0], + incorrect_tokens=answer_tokens[batch, 0:1], + ) + + # No incorrect tokens + ref_logit_diffs = einsum( + "... d_model, ... d_model -> ...", scaled_residual_stack, answer_residual_directions[:, 0] + ) + logit_diffs = cache.logit_attrs( + accumulated_residual, + pos_slice=-1, + tokens=answer_tokens[:, 0], + incorrect_tokens=None, + ) + assert torch.isclose(ref_logit_diffs, logit_diffs).all() + @torch.no_grad def test_accumulated_resid_with_apply_ln(): @@ -197,9 +252,24 @@ def test_accumulated_resid_with_apply_ln(): scaled_residual_stack = cache.accumulated_resid( layer=-1, incl_mid=True, pos_slice=-1, apply_ln=True ) + assert torch.isclose(ref_scaled_residual_stack, scaled_residual_stack, atol=1e-7).all() + # Now do the same but using None as the layer and Slice(-1) as the pos_slice + scaled_residual_stack, labels = cache.accumulated_resid( + layer=None, incl_mid=True, pos_slice=Slice(-1), apply_ln=True, return_labels=True + ) assert torch.isclose(ref_scaled_residual_stack, scaled_residual_stack, atol=1e-7).all() + expected_labels = [] + for l in range(model.cfg.n_layers + 1): + if l == model.cfg.n_layers: + expected_labels.append("final_post") + continue + expected_labels.append(f"{l}_pre") + expected_labels.append(f"{l}_mid") + + assert labels == expected_labels + @torch.no_grad def test_decompose_resid_with_apply_ln(): @@ -216,11 +286,27 @@ def test_decompose_resid_with_apply_ln(): ref_scaled_residual_stack = cache.apply_ln_to_stack(per_layer_residual, layer=-1, pos_slice=-1) # Get scaled_residual_stack using apply_ln parameter - scaled_residual_stack = cache.decompose_resid(layer=-1, pos_slice=-1, apply_ln=True) + scaled_residual_stack = cache.decompose_resid(layer=None, pos_slice=Slice(-1), apply_ln=True) assert torch.isclose(ref_scaled_residual_stack, scaled_residual_stack, atol=1e-7).all() +@torch.no_grad +def test_decompose_resid_including_attention(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + ref_attention_resids = torch.stack( + [cache["attn_out", l][:, -1] for l in range(model.cfg.n_layers)] + ) + residual_stack = cache.decompose_resid( + layer=1, pos_slice=Slice(-1), mlp_input=True, apply_ln=False, incl_embeds=False, mode="attn" + ) + + assert torch.isclose(ref_attention_resids, residual_stack, atol=1e-7).all() + + @torch.no_grad def test_stack_head_results_with_apply_ln(): # Load solu-2l @@ -233,7 +319,9 @@ def test_stack_head_results_with_apply_ln(): # Get per head resid stack and apply ln seperately (cribbed notebook code) per_head_residual = cache.stack_head_results(layer=-1, pos_slice=-1) - ref_scaled_residual_stack = cache.apply_ln_to_stack(per_head_residual, layer=-1, pos_slice=-1) + ref_scaled_residual_stack = cache.apply_ln_to_stack( + per_head_residual, layer=None, pos_slice=Slice(-1) + ) # Get scaled_residual_stack using apply_ln parameter scaled_residual_stack = cache.stack_head_results(layer=-1, pos_slice=-1, apply_ln=True) @@ -241,6 +329,36 @@ def test_stack_head_results_with_apply_ln(): assert torch.isclose(ref_scaled_residual_stack, scaled_residual_stack, atol=1e-7).all() +@torch.no_grad +def test_stack_head_results_including_remainder(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + ref_resid_post = cache["resid_post", 0][None, :, -1] + per_head_residual, labels = cache.stack_head_results( + layer=1, pos_slice=-1, incl_remainder=True, return_labels=True + ) + remainder = ref_resid_post - per_head_residual[:-1].sum(dim=0) + assert torch.isclose(remainder, per_head_residual[-1]).all() + assert labels[:-1] == [f"L0H{i}" for i in range(model.cfg.n_heads)] + assert labels[-1] == "remainder" + + ref_resid_post = cache["resid_post", -1][None, :, -1] + per_head_residual, labels = cache.stack_head_results( + layer=0, pos_slice=-1, incl_remainder=True, return_labels=True + ) + assert torch.isclose(ref_resid_post, per_head_residual, atol=1e-7).all() + assert len(labels) == 1 + assert labels[-1] == "remainder" + + per_head_residual, labels = cache.stack_head_results( + layer=0, pos_slice=-1, incl_remainder=False, return_labels=True + ) + assert torch.isclose(per_head_residual, torch.zeros_like(per_head_residual)).all() + assert len(labels) == 0 + + @torch.no_grad def test_stack_neuron_results_with_apply_ln(): # Load solu-2l @@ -256,6 +374,400 @@ def test_stack_neuron_results_with_apply_ln(): ref_scaled_residual_stack = cache.apply_ln_to_stack(neuron_result_stack, layer=-1, pos_slice=-1) # Get scaled_residual_stack using apply_ln parameter - scaled_residual_stack = cache.stack_neuron_results(layer=-1, pos_slice=-1, apply_ln=True) + scaled_residual_stack = cache.stack_neuron_results(layer=-1, pos_slice=Slice(-1), apply_ln=True) assert torch.isclose(ref_scaled_residual_stack, scaled_residual_stack, atol=1e-7).all() + + +@torch.no_grad +def test_stack_neuron_results_including_remainder(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + ref_resid_post = cache["resid_post", 0][None, :, -1] + neuron_result_stack, labels = cache.stack_neuron_results( + layer=1, pos_slice=Slice(-1), incl_remainder=True, return_labels=True + ) + remainder = ref_resid_post - neuron_result_stack[:-1].sum(dim=0) + assert torch.isclose(remainder, neuron_result_stack[-1]).all() + assert labels[:-1] == [f"L0N{i}" for i in range(model.cfg.d_mlp)] + assert labels[-1] == "remainder" + + ref_resid_post = cache["resid_post", -1][None, :, -1] + neuron_result_stack, labels = cache.stack_neuron_results( + layer=0, pos_slice=-1, incl_remainder=True, return_labels=True + ) + assert torch.isclose(ref_resid_post, neuron_result_stack, atol=1e-7).all() + assert len(labels) == 1 + assert labels[-1] == "remainder" + + neuron_result_stack, labels = cache.stack_neuron_results( + layer=0, pos_slice=-1, incl_remainder=False, return_labels=True + ) + assert torch.isclose(neuron_result_stack, torch.zeros_like(neuron_result_stack)).all() + assert len(labels) == 0 + + +@torch.no_grad +def test_stack_neuron_results_using_neuron_slice(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + neuron_result_stack, labels = cache.stack_neuron_results( + layer=1, pos_slice=Slice(-1), neuron_slice=Slice([0, 1, 2]), return_labels=True + ) + assert labels == [f"L0N{i}" for i in range(3)] + + +@torch.no_grad +def test_remove_batch_dim(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens[:1]) + + assert cache.has_batch_dim + shapes_before_removal = {key: cache.cache_dict[key].shape for key in cache.cache_dict} + + # Removing batch dim changes the shape of the cached tensors + cache.remove_batch_dim() + assert not cache.has_batch_dim + assert all( + shapes_before_removal[key][1:] == cache.cache_dict[key].shape + for key in shapes_before_removal + ) + + # Removing batch dim again does not change anything + cache.remove_batch_dim() + assert not cache.has_batch_dim + assert all( + shapes_before_removal[key][1:] == cache.cache_dict[key].shape + for key in shapes_before_removal + ) + + +@torch.no_grad +def test_remove_batch_dim_fails_if_batch_gt_1(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + assert cache.has_batch_dim + with pytest.raises(AssertionError): + cache.remove_batch_dim() + + +@torch.no_grad +def test_retrieve_activations(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + key = ("scale", 1, "ln1") + str_key = utils.get_act_name(*key) + assert torch.equal(cache[key], cache[str_key]) + + key = ("scale", -1, "ln1") + str_key = f"scale{model.cfg.n_layers - 1}ln1" + assert torch.equal(cache[key], cache[str_key]) + + key = ("k", -1, None) + str_key = f"blocks.{model.cfg.n_layers - 1}.attn.hook_k" + assert torch.equal(cache[key], cache[str_key]) + + key = "embed" + str_key = utils.get_act_name(key) + assert torch.equal(cache[key], cache[str_key]) + + key = ("embed", None) + str_key = utils.get_act_name(*key) + assert torch.equal(cache[key], cache[str_key]) + + +@torch.no_grad +def test_get_items(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + assert all( + cache_key == cache_dict_key and torch.equal(cache_val, cache_dict_val) + for (cache_key, cache_val), (cache_dict_key, cache_dict_val) in zip( + cache.items(), cache.cache_dict.items() + ) + ) + + +@torch.no_grad +def test_get_values(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + assert all( + torch.equal(cache_val, cache_dict_val) + for cache_val, cache_dict_val in zip(cache.values(), cache.cache_dict.values()) + ) + + +@torch.no_grad +def test_get_keys(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + assert all( + cache_key == cache_dict_key + for cache_key, cache_dict_key in zip(cache.keys(), cache.cache_dict.keys()) + ) + + +@torch.no_grad +def test_apply_slice_to_batch_dim(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + assert cache.has_batch_dim + batch_slice = Slice((2, 4)) + new_cache = cache.apply_slice_to_batch_dim(batch_slice) + + assert new_cache.has_batch_dim + assert all(torch.equal(cache[key][2:4], new_cache[key]) for key in cache.cache_dict) + + batch_slice = 2 + new_cache = cache.apply_slice_to_batch_dim(batch_slice) + + assert not new_cache.has_batch_dim + assert all(torch.equal(cache[key][2], new_cache[key]) for key in cache.cache_dict) + + +@torch.no_grad +def test_toggle_autodiff(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + assert not torch.is_grad_enabled() + cache.toggle_autodiff(mode=True) + assert torch.is_grad_enabled() + + +@torch.no_grad +def test_stack_activation(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + stack = cache.stack_activation("scale", -1, "ln1") + assert all( + torch.equal(cache[("scale", layer, "ln1")], stack[layer]) + for layer in range(model.cfg.n_layers) + ) + + stack = cache.stack_activation("scale", 1, "ln1") + assert all(torch.equal(cache[("scale", layer, "ln1")], stack[layer]) for layer in range(1)) + + +@torch.no_grad +def test_get_full_resid_decomposition(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + ref_head_stack, ref_head_stack_labels = cache.stack_head_results( + layer=model.cfg.n_layers, pos_slice=Slice(-1), apply_ln=True, return_labels=True + ) + ref_mlp_stack, ref_mlp_stack_labels = cache.decompose_resid( + layer=model.cfg.n_layers, + mlp_input=False, + pos_slice=Slice(-1), + incl_embeds=False, + mode="mlp", + apply_ln=True, + return_labels=True, + ) + ref_embed = cache.apply_ln_to_stack( + cache["embed"][None, :, -1], pos_slice=Slice(-1), mlp_input=False + ) + ref_pos_embed = cache.apply_ln_to_stack( + cache["pos_embed"][None, :, -1], pos_slice=Slice(-1), mlp_input=False + ) + + ref_bias = model.accumulated_bias(model.cfg.n_layers, mlp_input=False, include_mlp_biases=False) + ref_bias = ref_bias.expand((1,) + ref_head_stack.shape[1:]) + ref_bias = cache.apply_ln_to_stack(ref_bias, pos_slice=Slice(-1), mlp_input=False) + + head_stack_len = ref_head_stack.size(0) + mlp_stack_len = ref_mlp_stack.size(0) + + residual_stack, residual_stack_labels = cache.get_full_resid_decomposition( + layer=-1, pos_slice=-1, apply_ln=True, expand_neurons=False, return_labels=True + ) + assert torch.isclose(ref_head_stack, residual_stack[:head_stack_len], atol=1e-7).all() + assert ref_head_stack_labels == residual_stack_labels[:head_stack_len] + + assert torch.isclose( + ref_mlp_stack, residual_stack[head_stack_len : head_stack_len + mlp_stack_len], atol=1e-7 + ).all() + assert ( + ref_mlp_stack_labels + == residual_stack_labels[head_stack_len : head_stack_len + mlp_stack_len] + ) + + assert torch.isclose( + ref_embed, + residual_stack[head_stack_len + mlp_stack_len : head_stack_len + mlp_stack_len + 1], + atol=1e-7, + ).all() + assert "embed" == residual_stack_labels[head_stack_len + mlp_stack_len] + + assert torch.isclose( + ref_pos_embed, + residual_stack[head_stack_len + mlp_stack_len + 1 : head_stack_len + mlp_stack_len + 2], + atol=1e-7, + ).all() + assert "pos_embed" == residual_stack_labels[head_stack_len + mlp_stack_len + 1] + + assert torch.isclose( + ref_bias, + residual_stack[head_stack_len + mlp_stack_len + 2 : head_stack_len + mlp_stack_len + 3], + atol=1e-7, + ).all() + assert "bias" == residual_stack_labels[head_stack_len + mlp_stack_len + 2] + + +@torch.no_grad +def test_get_full_resid_decomposition_with_neurons_expanded(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + ref_head_stack, ref_head_stack_labels = cache.stack_head_results( + layer=1, pos_slice=Slice(-1), apply_ln=True, return_labels=True + ) + ref_neuron_stack, ref_neuron_labels = cache.stack_neuron_results( + 1, pos_slice=Slice(-1), return_labels=True + ) + ref_neuron_stack = cache.apply_ln_to_stack(ref_neuron_stack, layer=1, pos_slice=Slice(-1)) + + head_stack_len = ref_head_stack.size(0) + neuron_stack_len = ref_neuron_stack.size(0) + + residual_stack, residual_stack_labels = cache.get_full_resid_decomposition( + layer=1, pos_slice=Slice(-1), apply_ln=True, expand_neurons=True, return_labels=True + ) + + assert torch.isclose( + ref_neuron_stack, + residual_stack[head_stack_len : head_stack_len + neuron_stack_len], + atol=1e-7, + ).all() + assert ( + ref_neuron_labels + == residual_stack_labels[head_stack_len : head_stack_len + neuron_stack_len] + ) + + +@torch.no_grad +def test_get_full_resid_decomposition_without_applying_ln(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + ref_head_stack = cache.stack_head_results( + layer=1, pos_slice=Slice(-1), apply_ln=True, return_labels=False + ) + ref_neuron_stack = cache.stack_neuron_results(1, pos_slice=Slice(-1), return_labels=False) + + head_stack_len = ref_head_stack.size(0) + neuron_stack_len = ref_neuron_stack.size(0) + + residual_stack = cache.get_full_resid_decomposition( + layer=1, pos_slice=Slice(-1), apply_ln=False, expand_neurons=True, return_labels=False + ) + + assert torch.isclose( + ref_neuron_stack, + residual_stack[head_stack_len : head_stack_len + neuron_stack_len], + atol=1e-7, + ).all() + + +@torch.no_grad +def test_get_full_resid_decomposition_attn_only_model(): + model = load_model("attn-only-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + ref_head_stack = cache.stack_head_results( + layer=1, pos_slice=Slice(-1), apply_ln=False, return_labels=False + ) + + head_stack_len = ref_head_stack.size(0) + + residual_stack = cache.get_full_resid_decomposition( + layer=1, pos_slice=Slice(-1), apply_ln=False, expand_neurons=False, return_labels=False + ) + + assert torch.isclose(ref_head_stack, residual_stack[:head_stack_len], atol=1e-7).all() + + +@torch.no_grad +def test_compute_test_head_results_does_not_compute_results_twice(): + model = load_model("attn-only-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + assert "blocks.0.attn.hook_result" not in cache.cache_dict + cache.compute_head_results() + assert "blocks.0.attn.hook_result" in cache.cache_dict + + # set infinity to the first element of the head results + assert cache.cache_dict["blocks.0.attn.hook_result"][0, 0, 0, 0] != float("inf") + cache.cache_dict["blocks.0.attn.hook_result"][0, 0, 0, 0] = float("inf") + cache.compute_head_results() + + # assert the value has not changed + assert cache.cache_dict["blocks.0.attn.hook_result"][0, 0, 0, 0] == float("inf") + + +@torch.no_grad +def test_get_neuron_results(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + layer = 1 + ref_neuron_acts = ( + cache[f"blocks.{layer}.mlp.hook_post"][:, -1, :2, None] * model.blocks[layer].mlp.W_out[:2] + ) + + neuron_acts = cache.get_neuron_results( + layer, + neuron_slice=[0, 1], + pos_slice=-1, + ) + + assert torch.isclose(ref_neuron_acts, neuron_acts).all() + + +@torch.no_grad +def test_get_neuron_results_without_slice(): + model = load_model("solu-2l") + tokens, _ = get_ioi_tokens_and_answer_tokens(model) + _, cache = model.run_with_cache(tokens) + + layer = 1 + ref_neuron_acts = ( + cache[f"blocks.{layer}.mlp.hook_post"][..., None] * model.blocks[layer].mlp.W_out + ) + + neuron_acts = cache.get_neuron_results( + layer, + neuron_slice=None, + pos_slice=None, + ) + + assert torch.isclose(ref_neuron_acts, neuron_acts).all() diff --git a/tests/acceptance/test_hooked_encoder_decoder.py b/tests/acceptance/test_hooked_encoder_decoder.py new file mode 100644 index 0000000..8fc2810 --- /dev/null +++ b/tests/acceptance/test_hooked_encoder_decoder.py @@ -0,0 +1,337 @@ +import pytest +import torch +from jaxtyping import Float +from torch.testing import assert_close +from transformers import AutoTokenizer, T5ForConditionalGeneration + +from transformer_lens import HookedEncoderDecoder + +MODEL_NAME = "t5-small" + + +@pytest.fixture(scope="module") +def our_model(): + return HookedEncoderDecoder.from_pretrained(MODEL_NAME, device="cpu") + + +@pytest.fixture(scope="module") +def huggingface_model(): + return T5ForConditionalGeneration.from_pretrained(MODEL_NAME).eval() + + +@pytest.fixture(scope="module") +def tokenizer(): + return AutoTokenizer.from_pretrained(MODEL_NAME) + + +@pytest.fixture +def hello_world_tokens(tokenizer): + return tokenizer("Hello, world!", return_tensors="pt")["input_ids"] + + +@pytest.fixture +def decoder_input_ids(tokenizer): + return torch.LongTensor([[tokenizer.pad_token_id]]) + + +def test_full_model(our_model, huggingface_model, tokenizer, decoder_input_ids): + sequences = ["Hello, world!", "this is another sequence of tokens"] + + tokenized = tokenizer(sequences, return_tensors="pt", padding=True) + decoder_ids = torch.stack([decoder_input_ids[0]] * len(sequences), dim=0) + input_ids = tokenized["input_ids"] + + attention_mask = tokenized["attention_mask"] + + huggingface_model_out = huggingface_model( + input_ids=input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_ids, + ).logits + our_model_out = our_model( + input_ids, + decoder_input=decoder_ids, + one_zero_attention_mask=attention_mask, + ) + assert_close(huggingface_model_out, our_model_out, rtol=1.3e-6, atol=4e-5) + + +def test_encoder(our_model, huggingface_model, hello_world_tokens): + our_embeds = our_model.embed(hello_world_tokens) + pos_bias = our_model.encoder[0].attn.compute_relative_attention_bias( + hello_world_tokens.shape[1], hello_world_tokens.shape[1] + ) + + for our_layer in our_model.encoder: + our_embeds = our_layer(resid_pre=our_embeds, position_bias=pos_bias) + + our_encoder_out = our_model.encoder_final_ln(our_embeds) + + huggingface_encoder_out = huggingface_model.encoder(hello_world_tokens).last_hidden_state + + assert_close(our_encoder_out, huggingface_encoder_out, rtol=1.3e-6, atol=4e-5) + + +def test_decoder(our_model, huggingface_model, hello_world_tokens, decoder_input_ids): + encoder_hidden = huggingface_model.encoder(hello_world_tokens)[0] + + embeds = our_model.embed(decoder_input_ids) + pos_bias = our_model.decoder[0].attn.compute_relative_attention_bias( + decoder_input_ids.shape[1], decoder_input_ids.shape[1] + ) + for layer in our_model.decoder: + embeds = layer(embeds, encoder_hidden_states=encoder_hidden, position_bias=pos_bias) + + our_decoder_out = our_model.decoder_final_ln(embeds) + hf_decoder_out = huggingface_model.decoder( + decoder_input_ids, encoder_hidden_states=encoder_hidden + )[0] + + assert_close(our_decoder_out, hf_decoder_out, rtol=1.3e-6, atol=4e-5) + + +def test_embed_one_sentence(our_model, huggingface_model, hello_world_tokens): + huggingface_embed = huggingface_model.encoder.embed_tokens + our_embed = our_model.embed + + huggingface_embed_out = huggingface_embed(hello_world_tokens)[0] + our_embed_out = our_embed(hello_world_tokens).squeeze(0) + assert_close(huggingface_embed_out, our_embed_out) + + +def test_relative_attention_bias(our_model, huggingface_model, hello_world_tokens): + # it is used only in self attention of first layer of encoder + huggingface_embed = huggingface_model.encoder.embed_tokens + huggingface_attn = huggingface_model.encoder.block[0].layer[0].SelfAttention + our_attn = our_model.encoder[0].attn + + assert huggingface_attn.has_relative_attention_bias + assert our_attn.has_relative_attention_bias + assert ( + our_attn.relative_attention_num_buckets == huggingface_attn.relative_attention_num_buckets + ) + assert ( + our_attn.relative_attention_max_distance == huggingface_attn.relative_attention_max_distance + ) + assert_close(our_attn.rel_pos_bias.weight, huggingface_attn.relative_attention_bias.weight) + + input_len = hello_world_tokens.shape[1] + our_bias = our_attn.compute_relative_attention_bias(input_len, input_len) + hf_bias = huggingface_attn.compute_bias(input_len, input_len) + assert_close(our_bias, hf_bias, rtol=1e-5, atol=1e-5) + + embed_out = huggingface_embed(hello_world_tokens) + + huggingface_attn_out = huggingface_attn(embed_out)[0] + our_attn_out = our_attn(embed_out, embed_out, embed_out, position_bias=our_bias) + + assert_close(our_attn_out, huggingface_attn_out, rtol=7.4e-4, atol=1e-5) + + +def test_relative_attention_layer(our_model, huggingface_model, hello_world_tokens): + # it is used only in self attention of first layer of encoder + hf_block = huggingface_model.encoder.block[0].layer[0] + our_block = our_model.encoder[0] + resid = huggingface_model.encoder.embed_tokens(hello_world_tokens) + + input_len = hello_world_tokens.shape[1] + our_bias = our_block.attn.compute_relative_attention_bias(input_len, input_len) + resid_norm = our_block.ln1(resid) + our_out = resid + our_block.attn(resid_norm, resid_norm, resid_norm, position_bias=our_bias) + + hf_out = hf_block(resid)[0] + assert_close(our_out, hf_out, rtol=1.3e-6, atol=4e-5) + + +def test_attention(our_model, huggingface_model, hello_world_tokens): + huggingface_embed = huggingface_model.encoder.embed_tokens + huggingface_attn = huggingface_model.encoder.block[1].layer[0].SelfAttention + + embed_out = huggingface_embed(hello_world_tokens) + our_attn = our_model.encoder[1].attn + + our_attn_out = our_attn(embed_out, embed_out, embed_out) + huggingface_attn_out = huggingface_attn(embed_out)[0] + + assert_close(our_attn_out, huggingface_attn_out, rtol=5e-4, atol=1e-5) + + +def test_decoder_attention(our_model, huggingface_model, hello_world_tokens): + huggingface_embed = huggingface_model.decoder.embed_tokens + huggingface_attn = huggingface_model.decoder.block[1].layer[0].SelfAttention + + embed_out = huggingface_embed(hello_world_tokens) + our_attn = our_model.decoder[1].attn + + our_attn_out = our_attn(embed_out, embed_out, embed_out) + huggingface_attn_out = huggingface_attn(embed_out)[0] + assert_close(our_attn_out, huggingface_attn_out, rtol=3e-4, atol=1e-5) + + +def test_attention_layer(our_model, huggingface_model, hello_world_tokens): + huggingface_embed = huggingface_model.encoder.embed_tokens + huggingface_attn = huggingface_model.encoder.block[1].layer[0] + + embed_out = huggingface_embed(hello_world_tokens) + our_attn = our_model.encoder[1].attn + norm_embed = our_model.encoder[1].ln1(embed_out) + our_attn_out = our_attn(norm_embed, norm_embed, norm_embed) + embed_out + + huggingface_attn_out = huggingface_attn(embed_out)[0] + assert_close(our_attn_out, huggingface_attn_out, rtol=2e-4, atol=1e-5) + + +def test_decoder_attention_layer(our_model, huggingface_model, hello_world_tokens): + huggingface_embed = huggingface_model.decoder.embed_tokens + huggingface_attn = huggingface_model.decoder.block[1].layer[0] + + embed_out = huggingface_embed(hello_world_tokens) + our_attn = our_model.decoder[1].attn + norm_embed = our_model.decoder[1].ln1(embed_out) + our_attn_out = our_attn(norm_embed, norm_embed, norm_embed) + embed_out + + huggingface_attn_out = huggingface_attn(embed_out)[0] + assert_close(our_attn_out, huggingface_attn_out, rtol=3e-4, atol=4e-5) + + +def test_cross_attention(our_model, huggingface_model, hello_world_tokens, decoder_input_ids): + encoder_hidden = huggingface_model.encoder(hello_world_tokens).last_hidden_state + decoder_hidden = huggingface_model.decoder.embed_tokens(decoder_input_ids) + + huggingface_cross_attn = huggingface_model.decoder.block[0].layer[1].EncDecAttention + our_cross_attn = our_model.decoder[0].cross_attn + + our_cross_attn_out = our_cross_attn(decoder_hidden, encoder_hidden, encoder_hidden) + huggingface_cross_attn_out = huggingface_cross_attn( + decoder_hidden, key_value_states=encoder_hidden + )[0] + assert_close(our_cross_attn_out, huggingface_cross_attn_out, rtol=2e-4, atol=1e-5) + + +def test_cross_attention_layer(our_model, huggingface_model, hello_world_tokens, decoder_input_ids): + encoder_hidden = huggingface_model.encoder(hello_world_tokens).last_hidden_state + decoder_hidden = huggingface_model.decoder.embed_tokens(decoder_input_ids) + + hf_layer = huggingface_model.decoder.block[0].layer[1] + our_layer = our_model.decoder[0] + # assert ln weights are the same + assert_close(hf_layer.layer_norm.weight, our_layer.ln2.w) + + our_cross_attn_out = ( + our_layer.cross_attn(our_layer.ln2(decoder_hidden), encoder_hidden, encoder_hidden) + + decoder_hidden + ) + huggingface_cross_attn_out = hf_layer(decoder_hidden, key_value_states=encoder_hidden)[0] + assert_close(our_cross_attn_out, huggingface_cross_attn_out, rtol=2e-4, atol=1e-5) + + +def test_encoder_block(our_model, huggingface_model, hello_world_tokens): + huggingface_embed = huggingface_model.encoder.embed_tokens + huggingface_block = huggingface_model.encoder.block[1] + our_block = our_model.encoder[1] + + embed_out = huggingface_embed(hello_world_tokens) + + hf_out = huggingface_block(embed_out)[0] + our_out = our_block(embed_out) + + assert_close(our_out, hf_out, rtol=2e-4, atol=2e-5) + + +def test_decoder_block(our_model, huggingface_model, hello_world_tokens, decoder_input_ids): + huggingface_embed = huggingface_model.decoder.embed_tokens + huggingface_block = huggingface_model.decoder.block[1] + our_block = our_model.decoder[1] + + encoder_hidden = huggingface_model.encoder(hello_world_tokens)[0] + decoder_hidden = huggingface_model.decoder.block[0](huggingface_embed(decoder_input_ids))[0] + + our_out = our_block(decoder_hidden, encoder_hidden_states=encoder_hidden) + hf_out = huggingface_block(decoder_hidden, encoder_hidden_states=encoder_hidden)[0] + + assert_close(hf_out, our_out, rtol=2e-4, atol=2e-5) + + +def test_layernorm(our_model, huggingface_model, hello_world_tokens): + huggingface_embed = huggingface_model.encoder.embed_tokens + huggingface_layernorm = huggingface_model.encoder.block[0].layer[0].layer_norm + our_layernorm = our_model.encoder[0].ln1 + + embed_out = huggingface_embed(hello_world_tokens) + + our_layernorm_out = our_layernorm(embed_out) + huggingface_layernorm_out = huggingface_layernorm(embed_out) + assert_close(our_layernorm_out, huggingface_layernorm_out) + + +def test_unembed(our_model, huggingface_model, hello_world_tokens): + huggingface_model_hidden = huggingface_model.decoder(hello_world_tokens).last_hidden_state + + our_model_logits = our_model.unembed(huggingface_model_hidden) + huggingface_model_logits = huggingface_model.lm_head(huggingface_model_hidden) + + assert_close(our_model_logits, huggingface_model_logits, rtol=1.3e-3, atol=1e-5) + + +def test_run_with_cache(our_model, hello_world_tokens, decoder_input_ids): + logits, cache = our_model.run_with_cache(hello_world_tokens, decoder_input=decoder_input_ids) + + # check that an arbitrary subset of the keys exist and have the right shape + seq_len = 5 + generated_len = 1 + assert "hook_embed" in cache + assert cache["hook_embed"].shape == (1, seq_len, 512) + assert "encoder.1.attn.hook_v" in cache + assert cache["encoder.1.attn.hook_v"].shape == (1, seq_len, 8, 64) + assert "encoder.3.attn.hook_attn_scores" in cache + assert cache["encoder.3.attn.hook_attn_scores"].shape == (1, 8, seq_len, seq_len) + assert "decoder.0.cross_attn.hook_k" in cache + assert cache["decoder.0.cross_attn.hook_attn_scores"].shape == ( + 1, + 8, + generated_len, + seq_len, + ) + assert "decoder.3.hook_resid_post" in cache + assert cache["decoder.3.hook_resid_post"].shape == (1, generated_len, 512) + + +def test_from_pretrained_revision(): + """ + Check that the from_pretrained parameter `revision` (= git version) works + """ + + _ = HookedEncoderDecoder.from_pretrained(MODEL_NAME, revision="main") + + try: + _ = HookedEncoderDecoder.from_pretrained(MODEL_NAME, revision="inexistent_branch_name") + except: + pass + else: + raise AssertionError("Should have raised an error") + + +def test_predictions(our_model, huggingface_model, tokenizer, decoder_input_ids): + input_ids = tokenizer("My name is Wolfgang and I live in Berlin", return_tensors="pt")[ + "input_ids" + ] + + def get_predictions(logits: Float[torch.Tensor, "batch pos d_vocab"]): + predicted_tokens = logits[0].argmax(dim=-1) + return tokenizer.batch_decode(predicted_tokens) + + our_model_logits = our_model(input_ids, decoder_input=decoder_input_ids) + our_prediction = get_predictions(our_model_logits) + + huggingface_model_logits = huggingface_model( + input_ids, decoder_input_ids=decoder_input_ids + ).logits + huggingface_prediction = get_predictions(huggingface_model_logits) + + assert our_prediction == huggingface_prediction + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="Requires a CUDA device") +def test_cuda(hello_world_tokens, decoder_input_ids): + model = HookedEncoderDecoder.from_pretrained(MODEL_NAME) + model(hello_world_tokens, decoder_input=decoder_input_ids.cuda()) diff --git a/tests/integration/test_attention_mask.py b/tests/integration/test_attention_mask.py index 6b0951f..fc4fe41 100644 --- a/tests/integration/test_attention_mask.py +++ b/tests/integration/test_attention_mask.py @@ -45,3 +45,35 @@ def attn_hook(attn, hook): ] model.run_with_hooks(input, fwd_hooks=fwd_hooks) + + +def test_masked_tokens(): + """Test that masking tokens works as expected.""" + MODEL = "solu-1l" + prompts = [ + "Hello world!", + "The quick brown fox jumps over the lazy dog.", + ] + model = HookedTransformer.from_pretrained(MODEL) + tokens = model.to_tokens(prompts) + + # Part 1: If the mask is all ones, the output should be the same as if there was no mask. + full_mask = torch.ones_like(tokens) + no_mask_out = model(tokens) + full_mask_out = model(tokens, attention_mask=full_mask) + assert torch.allclose(no_mask_out, full_mask_out), "Full mask should be equivalent to no mask" + + # Part 2: If the mask has a column of zeros, the output should be the same as if that token + # position was removed from the input. + remove_tok_idx = 2 + edited_tokens = torch.cat([tokens[:, :remove_tok_idx], tokens[:, remove_tok_idx + 1 :]], dim=1) + edited_mask = full_mask.clone() + edited_mask[:, remove_tok_idx] = 0 + edited_no_mask_out = model(edited_tokens) + edited_mask_out = model(tokens, attention_mask=edited_mask) + edited_mask_out = torch.cat( + [edited_mask_out[:, :remove_tok_idx], edited_mask_out[:, remove_tok_idx + 1 :]], dim=1 + ) + assert torch.allclose( + edited_no_mask_out, edited_mask_out, atol=1e-4 + ), "Edited mask should be equivalent to no mask" diff --git a/tests/integration/test_cross_entropy_loss.py b/tests/integration/test_cross_entropy_loss.py new file mode 100644 index 0000000..0400700 --- /dev/null +++ b/tests/integration/test_cross_entropy_loss.py @@ -0,0 +1,32 @@ +import torch + +from transformer_lens.HookedTransformer import HookedTransformer + + +def test_cross_entropy_attention_mask(): + """Check that adding a bunch of masked tokens to the input does not change the loss.""" + MODEL = "solu-1l" + model = HookedTransformer.from_pretrained(MODEL) + + # Step 1: Get the default loss on a prompt + prompt = ["The quick brown fox jumps over the lazy dog."] + default_tokens = model.to_tokens(prompt) + default_attention_mask = torch.ones_like(default_tokens) + default_loss = model(default_tokens, return_type="loss") + ones_mask_loss = model( + default_tokens, attention_mask=default_attention_mask, return_type="loss" + ) + assert torch.allclose(default_loss, ones_mask_loss, atol=1e-6) + + # Step 2: Get the loss when we add some extra tokens to the input and set their attention mask + # to zero + extra_prompt = ["Lorem ipsum dolor sit amet, consectetur adipiscing elit."] + extra_tokens = model.to_tokens(extra_prompt) + extra_zeros_attention_mask = torch.zeros_like(extra_tokens) + + combined_tokens = torch.cat([default_tokens, extra_tokens], dim=1) + combined_attention_mask = torch.cat([default_attention_mask, extra_zeros_attention_mask], dim=1) + combined_masked_loss = model( + combined_tokens, attention_mask=combined_attention_mask, return_type="loss" + ) + assert torch.allclose(default_loss, combined_masked_loss) diff --git a/tests/integration/test_hooks.py b/tests/integration/test_hooks.py index 231a571..6a9880a 100644 --- a/tests/integration/test_hooks.py +++ b/tests/integration/test_hooks.py @@ -1,3 +1,5 @@ +from typing import Any + import pytest import torch @@ -73,6 +75,24 @@ def test_context_manager_run_with_cache(): model.remove_all_hook_fns(including_permanent=True) +def test_backward_hook_runs_successfully(): + c = Counter() + + def skip_grad(output_grad: torch.Tensor, hook: Any): + c.inc() + return (output_grad,) + + with model.hooks(bwd_hooks=[(embed, skip_grad)]): + assert len(model.hook_dict["hook_embed"].bwd_hooks) == 1 + out = model(prompt) + assert c.count == 0 + out.sum().backward() # this should run the hook + assert len(model.hook_dict["hook_embed"].bwd_hooks) == 1 + assert len(model.hook_dict["hook_embed"].bwd_hooks) == 0 + assert c.count == 1 + model.remove_all_hook_fns(including_permanent=True) + + def test_hook_context_manager_with_permanent_hook(): c = Counter() model.add_perma_hook(embed, c.inc) diff --git a/tests/integration/test_match_huggingface.py b/tests/integration/test_match_huggingface.py new file mode 100644 index 0000000..b312457 --- /dev/null +++ b/tests/integration/test_match_huggingface.py @@ -0,0 +1,45 @@ +import math + +import pytest +import torch +from transformers import AutoModelForCausalLM + +from transformer_lens import HookedTransformer + + +class TestMatchHuggingFace: + # fixtures + @pytest.fixture(scope="class", params=["gpt2"]) + def model_name(self, request): + return request.param + + # tests + def test_compare_huggingface_mlp_match_local_implementation(self, model_name): + tl_model = HookedTransformer.from_pretrained_no_processing(model_name, device="cpu") + hf_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cpu") + tensor_shape = (3, 5, tl_model.cfg.d_model) + test_tensor = torch.randn(tensor_shape) + + for layer_n in range(len(tl_model.blocks)): + tl_out = tl_model.blocks[layer_n].mlp(test_tensor) + hf_out = hf_model.transformer.h[layer_n].mlp(test_tensor) + + assert torch.sum(tl_out == hf_out) == math.prod(tensor_shape) + + def test_compare_huggingface_attention_match_local_implementation(self, model_name): + tl_model = HookedTransformer.from_pretrained_no_processing(model_name, device="cpu") + hf_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cpu") + batch, pos, d_model = 3, 5, tl_model.cfg.d_model + input = torch.randn(batch, pos, d_model) + + for layer_n in range(len(tl_model.blocks)): + tl_out = tl_model.blocks[layer_n].attn( + query_input=input, + key_input=input, + value_input=input, + past_kv_cache_entry=None, + attention_mask=None, + ) + hf_out, _ = hf_model.transformer.h[layer_n].attn(hidden_states=input) + + assert torch.sum(tl_out == hf_out) == math.prod(tl_out.shape) diff --git a/tests/unit/components/mlps/test_can_be_used_as_mlp.py b/tests/unit/components/mlps/test_can_be_used_as_mlp.py new file mode 100644 index 0000000..a69c4c5 --- /dev/null +++ b/tests/unit/components/mlps/test_can_be_used_as_mlp.py @@ -0,0 +1,92 @@ +from typing import Any, Dict + +import pytest +import torch + +from transformer_lens.components import LayerNorm, LayerNormPre +from transformer_lens.components.mlps.can_be_used_as_mlp import CanBeUsedAsMLP +from transformer_lens.hook_points import HookPoint +from transformer_lens.utils import solu + + +@pytest.fixture +def cfg() -> Dict[str, Any]: + return { + "n_layers": 12, + "n_ctx": 1024, + "d_head": 64, + "d_model": 128, + "d_mlp": 256, + "dtype": torch.float32, + "act_fn": "solu_ln", + "normalization_type": "LN", + "load_in_4bit": False, + } + + +def test_initialization(cfg: Dict[str, Any]): + CanBeUsedAsMLP(cfg) + + +def test_initialization_fails_without_d_mlp(cfg: Dict[str, Any]): + cfg["d_mlp"] = None + pytest.raises(ValueError) + CanBeUsedAsMLP(cfg) + + +def test_select_activation_function_selects_function(): + cfg = { + "n_layers": 12, + "n_ctx": 1024, + "d_head": 64, + "d_model": 128, + "d_mlp": 256, + "dtype": torch.float32, + "act_fn": "silu", + "normalization_type": "LN", + "load_in_4bit": False, + } + + model = CanBeUsedAsMLP(cfg) + model.select_activation_function() + assert model.act_fn is not None + + +def test_select_activation_function_with_layer_norm(): + cfg = { + "n_layers": 12, + "n_ctx": 1024, + "d_head": 64, + "d_model": 128, + "d_mlp": 256, + "dtype": torch.float32, + "act_fn": "solu_ln", + "normalization_type": "LN", + "load_in_4bit": False, + } + + model = CanBeUsedAsMLP(cfg) + model.select_activation_function() + assert model.act_fn == solu + assert isinstance(model.hook_mid, HookPoint) + assert isinstance(model.ln, LayerNorm) + + +def test_select_activation_function_with_layer_norm_pre(): + cfg = { + "n_layers": 12, + "n_ctx": 1024, + "d_head": 64, + "d_model": 128, + "d_mlp": 256, + "dtype": torch.float32, + "act_fn": "solu_ln", + "normalization_type": "LNPre", + "load_in_4bit": False, + } + + model = CanBeUsedAsMLP(cfg) + model.select_activation_function() + assert model.act_fn == solu + assert isinstance(model.hook_mid, HookPoint) + assert isinstance(model.ln, LayerNormPre) diff --git a/tests/unit/components/mlps/test_gated_mlp.py b/tests/unit/components/mlps/test_gated_mlp.py new file mode 100644 index 0000000..abb0b7b --- /dev/null +++ b/tests/unit/components/mlps/test_gated_mlp.py @@ -0,0 +1,41 @@ +from typing import Any, Dict + +import pytest +import torch +import torch.nn as nn + +from transformer_lens.components import GatedMLP, LayerNorm +from transformer_lens.utils import solu + + +@pytest.fixture +def cfg() -> Dict[str, Any]: + return { + "n_layers": 12, + "n_ctx": 1024, + "d_head": 64, + "d_model": 128, + "d_mlp": 256, + "dtype": torch.float32, + "act_fn": "solu_ln", + "normalization_type": "LN", + "load_in_4bit": False, + } + + +def test_initialization(cfg: Dict[str, Any]): + model = GatedMLP(cfg) + assert isinstance(model.W_in, nn.Parameter) + assert isinstance(model.W_gate, nn.Parameter) + assert isinstance(model.W_out, nn.Parameter) + assert isinstance(model.b_in, nn.Parameter) + assert isinstance(model.b_out, nn.Parameter) + assert model.act_fn == solu + assert isinstance(model.ln, LayerNorm) + + +def test_forward(cfg: Dict[str, Any]): + model = GatedMLP(cfg) + x = torch.randn(2, 10, cfg["d_model"]) + output = model(x) + assert output.shape == (2, 10, cfg["d_model"]) diff --git a/tests/unit/components/mlps/test_mlp.py b/tests/unit/components/mlps/test_mlp.py new file mode 100644 index 0000000..feb8be7 --- /dev/null +++ b/tests/unit/components/mlps/test_mlp.py @@ -0,0 +1,49 @@ +from typing import Any, Dict + +import pytest +import torch + +from transformer_lens.components import LayerNorm +from transformer_lens.components.mlps.mlp import MLP +from transformer_lens.hook_points import HookPoint + + +@pytest.fixture +def cfg() -> Dict[str, Any]: + return { + "n_layers": 12, + "n_ctx": 1024, + "d_head": 64, + "d_model": 128, + "d_mlp": 256, + "dtype": torch.float32, + "act_fn": "solu_ln", + "normalization_type": "LN", + "load_in_4bit": False, + } + + +def test_initialization(cfg: Dict[str, Any]): + MLP(cfg) + + +def test_forward_without_layer_norm(cfg: Dict[str, Any]): + cfg["act_fn"] = "solu" + + model = MLP(cfg) + + input = torch.full((1, 1, 128), 0.085) + + result = model(input) + + assert result.shape == (1, 1, 128) + + +def test_forward_with_layer_norm(cfg: Dict[str, Any]): + model = MLP(cfg) + assert isinstance(model.hook_mid, HookPoint) + assert isinstance(model.ln, LayerNorm) + + input = torch.full((1, 1, 128), 0.85) + result = model(input) + assert result.shape == (1, 1, 128) diff --git a/tests/unit/components/mlps/test_moe.py b/tests/unit/components/mlps/test_moe.py new file mode 100644 index 0000000..4c56126 --- /dev/null +++ b/tests/unit/components/mlps/test_moe.py @@ -0,0 +1,21 @@ +import torch + +from transformer_lens.components import MoE + + +def test_forward(): + cfg = { + "d_model": 32, + "d_mlp": 14336, + "d_head": 4, + "num_experts": 32, + "n_layers": 16, + "n_ctx": 2048, + "experts_per_token": 4, + "gated_mlp": True, + "act_fn": "silu", + } + moe = MoE(cfg) + + x = torch.rand((1, 4, 32)) + moe(x) diff --git a/tests/unit/components/test_attention.py b/tests/unit/components/test_attention.py new file mode 100644 index 0000000..c27ca6c --- /dev/null +++ b/tests/unit/components/test_attention.py @@ -0,0 +1,100 @@ +import pytest +import torch +import torch.nn as nn +from transformers.utils import is_bitsandbytes_available + +from transformer_lens.components import Attention +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + +if is_bitsandbytes_available(): + from bitsandbytes.nn.modules import Params4bit + + +def test_attention_hooked_transformer_config(): + cfg = HookedTransformerConfig( + n_layers=12, + d_model=512, + n_ctx=1024, + d_head=64, + n_heads=8, + load_in_4bit=False, + dtype=torch.float32, + act_fn="relu", + ) + attn = Attention(cfg) + assert attn.cfg == cfg + assert attn.cfg.n_layers == 12 + assert attn.cfg.d_model == 512 + assert attn.cfg.n_ctx == 1024 + assert attn.cfg.d_head == 64 + assert attn.cfg.n_heads == 8 + assert attn.cfg.load_in_4bit == False + assert attn.cfg.dtype == torch.float32 + assert attn.cfg.act_fn == "relu" + + assert isinstance(attn.W_K, nn.Parameter) + assert isinstance(attn.W_V, nn.Parameter) + assert attn.W_K.shape == (cfg.n_heads, cfg.d_model, cfg.d_head) + assert attn.W_V.shape == (cfg.n_heads, cfg.d_model, cfg.d_head) + + assert attn.b_K.shape == (cfg.n_heads, cfg.d_head) + assert attn.b_V.shape == (cfg.n_heads, cfg.d_head) + assert torch.all(attn.b_K == 0) + assert torch.all(attn.b_V == 0) + + +@pytest.mark.skipif(not is_bitsandbytes_available(), reason="bitsandbytes is not available") +def test_attention_load_in_4bit(): + cfg = HookedTransformerConfig( + n_layers=12, + d_model=512, + n_ctx=1024, + d_head=64, + n_heads=8, + load_in_4bit=True, + dtype=torch.float32, + act_fn="relu", + ) + attn = Attention(cfg) + assert attn.cfg == cfg + assert attn.cfg.n_layers == 12 + assert attn.cfg.d_model == 512 + assert attn.cfg.n_ctx == 1024 + assert attn.cfg.d_head == 64 + assert attn.cfg.n_heads == 8 + assert attn.cfg.load_in_4bit == False + assert attn.cfg.dtype == torch.float32 + assert attn.cfg.act_fn == "relu" + + assert isinstance(attn.W_K, Params4bit) + assert isinstance(attn.W_V, Params4bit) + nq = int((cfg.d_model * cfg.d_model) / 2) + assert attn.W_K.data.shape == (nq, 1) + assert attn.W_V.data.shape == (nq, 1) + + assert attn.b_K.shape == (cfg.n_heads, cfg.d_head) + assert attn.b_V.shape == (cfg.n_heads, cfg.d_head) + assert torch.all(attn.b_K == 0) + assert torch.all(attn.b_V == 0) + + +def test_attention_config_dict(): + cfg = { + "n_layers": 12, + "d_model": 512, + "n_ctx": 1024, + "d_head": 64, + "n_heads": 8, + "load_in_4bit": False, + "dtype": torch.float32, + "act_fn": "relu", + } + attn = Attention(cfg) + assert attn.cfg.n_layers == 12 + assert attn.cfg.d_model == 512 + assert attn.cfg.n_ctx == 1024 + assert attn.cfg.d_head == 64 + assert attn.cfg.n_heads == 8 + assert attn.cfg.load_in_4bit == False + assert attn.cfg.dtype == torch.float32 + assert attn.cfg.act_fn == "relu" diff --git a/tests/unit/factories/test_activation_function_factory.py b/tests/unit/factories/test_activation_function_factory.py new file mode 100644 index 0000000..41b495d --- /dev/null +++ b/tests/unit/factories/test_activation_function_factory.py @@ -0,0 +1,20 @@ +import pytest +import torch + +from transformer_lens.factories.activation_function_factory import ( + ActivationFunctionFactory, +) +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.utilities.activation_functions import SUPPORTED_ACTIVATIONS + + +@pytest.mark.parametrize("act_function", SUPPORTED_ACTIVATIONS.keys()) +def test_pick_activation_function_runs(act_function): + config = HookedTransformerConfig.unwrap( + {"n_layers": 12, "n_ctx": 1024, "d_head": 64, "d_model": 128, "act_fn": act_function} + ) + function = ActivationFunctionFactory.pick_activation_function(config) + assert function is not None + dummy_data = torch.zeros((1, 4, 32)) + result = function(dummy_data) + assert isinstance(result, torch.Tensor) diff --git a/tests/unit/factories/test_mlp_factory.py b/tests/unit/factories/test_mlp_factory.py new file mode 100644 index 0000000..114d637 --- /dev/null +++ b/tests/unit/factories/test_mlp_factory.py @@ -0,0 +1,74 @@ +import pytest +from transformers.utils import is_bitsandbytes_available + +from transformer_lens.components.mlps.gated_mlp import GatedMLP +from transformer_lens.components.mlps.gated_mlp_4bit import GatedMLP4Bit +from transformer_lens.components.mlps.mlp import MLP +from transformer_lens.factories.mlp_factory import MLPFactory +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def test_create_mlp_basic(): + config = HookedTransformerConfig.unwrap( + { + "n_layers": 12, + "n_ctx": 1024, + "d_head": 64, + "d_model": 128, + "act_fn": "solu", + } + ) + mlp = MLPFactory.create_mlp(config) + assert isinstance(mlp, MLP) + + +def test_create_mlp_gated(): + config = HookedTransformerConfig.unwrap( + { + "n_layers": 12, + "n_ctx": 1024, + "d_head": 64, + "d_model": 128, + "act_fn": "solu", + "gated_mlp": True, + } + ) + mlp = MLPFactory.create_mlp(config) + assert isinstance(mlp, GatedMLP) + + +@pytest.mark.skipif( + not is_bitsandbytes_available(), + reason="4 bit not available on current architecture", +) +def test_create_mlp_gated_4bit(): + config = HookedTransformerConfig.unwrap( + { + "n_layers": 12, + "n_ctx": 1024, + "d_head": 64, + "d_model": 128, + "act_fn": "solu", + "gated_mlp": True, + "load_in_4bit": True, + } + ) + mlp = MLPFactory.create_mlp(config) + assert isinstance(mlp, GatedMLP4Bit) + + +def test_create_moe(): + if is_bitsandbytes_available(): + config = HookedTransformerConfig.unwrap( + { + "n_layers": 12, + "n_ctx": 1024, + "d_head": 64, + "d_model": 128, + "act_fn": "solu", + "gated_mlp": True, + "num_experts": 32, + } + ) + mlp = MLPFactory.create_mlp(config) + assert isinstance(mlp, GatedMLP4Bit) diff --git a/tests/unit/pretrained_weight_conversions/test_neo.py b/tests/unit/pretrained_weight_conversions/test_neo.py new file mode 100644 index 0000000..c0b22b6 --- /dev/null +++ b/tests/unit/pretrained_weight_conversions/test_neo.py @@ -0,0 +1,52 @@ +from unittest import mock + +import torch + +from transformer_lens import HookedTransformer +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.pretrained.weight_conversions.neo import convert_neo_weights + + +def get_default_config(): + return HookedTransformerConfig( + d_model=128, d_head=8, n_heads=16, n_ctx=128, n_layers=1, d_vocab=50257, attn_only=True + ) + + +def test_convert_neo_weights_exposed(): + cfg = get_default_config() + + class MockNeo: + def __init__(self): + self.transformer = HookedTransformer(cfg) + self.transformer.wte = torch.nn.Embedding(cfg.d_vocab, cfg.d_model) + self.transformer.wpe = torch.nn.Embedding(cfg.n_ctx, cfg.d_model) + self.transformer.final_norm = torch.nn.LayerNorm(cfg.d_model) + self.transformer.h = [mock.Mock() for _ in range(cfg.n_layers)] + self.lm_head = torch.nn.Linear(cfg.d_model, cfg.d_vocab) + + for layer in self.transformer.h: + layer.ln_1 = torch.nn.LayerNorm(cfg.d_model) + layer.ln_2 = torch.nn.LayerNorm(cfg.d_model) + layer.attn = mock.Mock() + layer.attn.attention = mock.Mock() + layer.attn.attention.q_proj = torch.nn.Linear(cfg.d_model, cfg.d_model) + layer.attn.attention.k_proj = torch.nn.Linear(cfg.d_model, cfg.d_model) + layer.attn.attention.v_proj = torch.nn.Linear(cfg.d_model, cfg.d_model) + layer.attn.attention.out_proj = torch.nn.Linear(cfg.d_model, cfg.d_model) + layer.mlp = mock.Mock() + layer.mlp.c_fc = torch.nn.Linear(cfg.d_model, cfg.d_model) + layer.mlp.c_proj = torch.nn.Linear(cfg.d_model, cfg.d_model) + + self.transformer.ln_f = torch.nn.LayerNorm(cfg.d_model) + + neo = MockNeo() + + try: + convert_neo_weights(neo, cfg) + function_works = True + except Exception as e: + function_works = False + print(f"The convert_neo_weights function raised an error: {e}") + + assert function_works diff --git a/tests/unit/hooked_transformer_config/test_unwrap.py b/tests/unit/test_hooked_transformer_config.py similarity index 56% rename from tests/unit/hooked_transformer_config/test_unwrap.py rename to tests/unit/test_hooked_transformer_config.py index 4cf57c4..7ae94cf 100644 --- a/tests/unit/hooked_transformer_config/test_unwrap.py +++ b/tests/unit/test_hooked_transformer_config.py @@ -26,3 +26,31 @@ def test_hooked_transformer_config_dict(): result = HookedTransformerConfig.unwrap(hooked_transformer_config_dict) # Assert that the new returned value has been transformed into a config object assert isinstance(result, HookedTransformerConfig) + + +def test_is_layer_norm_activation_passes(): + hooked_transformer_config_dict = { + "n_layers": 2, + "d_vocab": 100, + "d_model": 6, + "n_ctx": 5, + "d_head": 2, + "attn_only": True, + "act_fn": "solu_ln", + } + config = HookedTransformerConfig.unwrap(hooked_transformer_config_dict) + assert config.is_layer_norm_activation() + + +def test_is_layer_norm_activation_fails(): + hooked_transformer_config_dict = { + "n_layers": 2, + "d_vocab": 100, + "d_model": 6, + "n_ctx": 5, + "d_head": 2, + "attn_only": True, + "act_fn": "relu", + } + config = HookedTransformerConfig.unwrap(hooked_transformer_config_dict) + assert not config.is_layer_norm_activation() diff --git a/tests/unit/test_loading_from_pretrained_utilities.py b/tests/unit/test_loading_from_pretrained_utilities.py new file mode 100644 index 0000000..de40e43 --- /dev/null +++ b/tests/unit/test_loading_from_pretrained_utilities.py @@ -0,0 +1,72 @@ +from unittest import mock + +import pytest + +from transformer_lens import HookedTransformer +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.loading_from_pretrained import fill_missing_keys + + +def get_default_config(): + return HookedTransformerConfig( + d_model=128, d_head=8, n_heads=16, n_ctx=128, n_layers=1, d_vocab=50257, attn_only=True + ) + + +# Successes + + +@mock.patch("logging.warning") +def test_fill_missing_keys(mock_warning): + cfg = get_default_config() + model = HookedTransformer(cfg) + default_state_dict = model.state_dict() + + incomplete_state_dict = {k: v for k, v in default_state_dict.items() if "W_" not in k} + + filled_state_dict = fill_missing_keys(model, incomplete_state_dict) + + assert set(filled_state_dict.keys()) == set(default_state_dict.keys()) + + # Check that warnings were issued for missing weight matrices + for key in default_state_dict: + if "W_" in key and key not in incomplete_state_dict: + mock_warning.assert_any_call( + f"Missing key for a weight matrix in pretrained, filled in with an empty tensor: {key}" + ) + + +def test_fill_missing_keys_with_hf_model_keys(): + cfg = get_default_config() + model = HookedTransformer(cfg) + default_state_dict = model.state_dict() + + incomplete_state_dict = {k: v for k, v in default_state_dict.items() if "hf_model" not in k} + + filled_state_dict = fill_missing_keys(model, incomplete_state_dict) + + expected_keys = set(default_state_dict.keys()) - { + k for k in default_state_dict.keys() if "hf_model" in k + } + assert set(filled_state_dict.keys()) == expected_keys + + +def test_fill_missing_keys_no_missing_keys(): + cfg = get_default_config() + model = HookedTransformer(cfg) + default_state_dict = model.state_dict() + + filled_state_dict = fill_missing_keys(model, default_state_dict) + + assert filled_state_dict == default_state_dict + + +# Failures + + +def test_fill_missing_keys_raises_error_on_invalid_model(): + invalid_model = None + default_state_dict = {} + + with pytest.raises(AttributeError): + fill_missing_keys(invalid_model, default_state_dict) diff --git a/transformer_lens/ActivationCache.py b/transformer_lens/ActivationCache.py index caff121..6fa336c 100644 --- a/transformer_lens/ActivationCache.py +++ b/transformer_lens/ActivationCache.py @@ -682,7 +682,10 @@ def stack_head_results( incl_remainder: bool = False, pos_slice: Union[Slice, SliceInput] = None, apply_ln: bool = False, - ) -> Float[torch.Tensor, "num_components *batch_and_pos_dims d_model"]: + ) -> Union[ + Float[torch.Tensor, "num_components *batch_and_pos_dims d_model"], + Tuple[Float[torch.Tensor, "num_components *batch_and_pos_dims d_model"], List[str]], + ]: """Stack Head Results. Returns a stack of all head results (ie residual stream contribution) up to layer L. A good @@ -736,7 +739,10 @@ def stack_head_results( labels.append("remainder") elif incl_remainder: # There are no components, so the remainder is the entire thing. - components = [pos_slice.apply(self[("resid_post", layer - 1)], dim=-2)] + components = torch.cat( + [pos_slice.apply(self[("resid_post", layer - 1)], dim=-2)[None]], dim=0 + ) + labels.append("remainder") else: # If this is called with layer 0, we return an empty tensor of the right shape to be # stacked correctly. This uses the shape of hook_embed, which is pretty janky since it @@ -752,7 +758,7 @@ def stack_head_results( components = self.apply_ln_to_stack(components, layer, pos_slice=pos_slice) if return_labels: - return components, labels # type: ignore # TODO: fix this properly + return components, labels else: return components @@ -896,11 +902,16 @@ def stack_neuron_results( ) if incl_remainder: - remainder = self[("resid_post", layer - 1)] - components.sum(dim=0) + remainder = pos_slice.apply( + self[("resid_post", layer - 1)], dim=-2 + ) - components.sum(dim=0) components = torch.cat([components, remainder[None]], dim=0) labels.append("remainder") elif incl_remainder: - components = [pos_slice.apply(self[("resid_post", layer - 1)], dim=-2)] + components = torch.cat( + [pos_slice.apply(self[("resid_post", layer - 1)], dim=-2)[None]], dim=0 + ) + labels.append("remainder") else: # Returning empty, give it the right shape to stack properly components = torch.zeros( @@ -937,7 +948,7 @@ def apply_ln_to_stack( element and position, which is why we need to use the cached scale factors rather than just applying a new LayerNorm. - If the model does not use LayerNorm, it returns the residual stack unchanged. + If the model does not use LayerNorm or RMSNorm, it returns the residual stack unchanged. Args: residual_stack: @@ -961,7 +972,7 @@ def apply_ln_to_stack( Whether residual_stack has a batch dimension. """ - if self.model.cfg.normalization_type not in ["LN", "LNPre"]: + if self.model.cfg.normalization_type not in ["LN", "LNPre", "RMS", "RMSPre"]: # The model does not use LayerNorm, so we don't need to do anything. return residual_stack if not isinstance(pos_slice, Slice): @@ -977,8 +988,9 @@ def apply_ln_to_stack( # Apply batch slice to the stack residual_stack = batch_slice.apply(residual_stack, dim=1) - # Center the stack - residual_stack = residual_stack - residual_stack.mean(dim=-1, keepdim=True) + # Center the stack onlny if the model uses LayerNorm + if self.model.cfg.normalization_type in ["LN", "LNPre"]: + residual_stack = residual_stack - residual_stack.mean(dim=-1, keepdim=True) if layer == self.model.cfg.n_layers or layer is None: scale = self["ln_final.hook_scale"] @@ -1004,7 +1016,10 @@ def get_full_resid_decomposition( apply_ln: bool = False, pos_slice: Union[Slice, SliceInput] = None, return_labels: bool = False, - ) -> Float[torch.Tensor, "num_components *batch_and_pos_dims d_model"]: + ) -> Union[ + Float[torch.Tensor, "num_components *batch_and_pos_dims d_model"], + Tuple[Float[torch.Tensor, "num_components *batch_and_pos_dims d_model"], List[str]], + ]: """Get the full Residual Decomposition. Returns the full decomposition of the residual stream into embed, pos_embed, each head @@ -1081,6 +1096,6 @@ def get_full_resid_decomposition( ) if return_labels: - return residual_stack, labels # type: ignore # TODO: fix this properly + return residual_stack, labels else: return residual_stack diff --git a/transformer_lens/HookedEncoderDecoder.py b/transformer_lens/HookedEncoderDecoder.py new file mode 100644 index 0000000..82e958a --- /dev/null +++ b/transformer_lens/HookedEncoderDecoder.py @@ -0,0 +1,416 @@ +"""Hooked EncoderDecoder + +Contains a T5 style model. This is separate from :class:`transformer_lens.HookedTransformer` +because it has a significantly different architecture to e.g. GPT style transformers. +""" + +from __future__ import annotations + +import logging +import os +from itertools import chain +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union, cast, overload + +import torch +from einops import repeat +from jaxtyping import Float, Int +from torch import nn +from transformers import AutoTokenizer +from typing_extensions import Literal + +import transformer_lens.loading_from_pretrained as loading +from transformer_lens.ActivationCache import ActivationCache +from transformer_lens.components import Embed, RMSNorm, T5Block, Unembed +from transformer_lens.FactoredMatrix import FactoredMatrix +from transformer_lens.hook_points import HookedRootModule, HookPoint +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.utilities import devices + + +class HookedEncoderDecoder(HookedRootModule): + """ + This class implements a T5 encoder-decoder using the components in ./components.py, with HookPoints on every interesting activation. It inherits from HookedRootModule. + + Limitations: + - Also note that model does not include dropouts, which may lead to inconsistent results from training or fine-tuning. + + Like HookedTransformer, it can have a pretrained Transformer's weights loaded via `.from_pretrained`. There are a few features you might know from HookedTransformer which are not yet supported: + - There is no preprocessing (e.g. LayerNorm folding) when loading a pretrained model + - The model only accepts tokens as inputs, and not strings, or lists of strings + """ + + def __init__(self, cfg, tokenizer=None, move_to_device=True, **kwargs): + super().__init__() + if isinstance(cfg, Dict): + cfg = HookedTransformerConfig(**cfg) + elif isinstance(cfg, str): + raise ValueError( + "Please pass in a config dictionary or HookedTransformerConfig object. If you want to load a pretrained model, use HookedEncoderDecoder.from_pretrained() instead." + ) + self.cfg = cfg + + if self.cfg.n_devices != 1: + raise ValueError("Multiple devices not supported for HookedEncoderDecoder") + if tokenizer is not None: + self.tokenizer = tokenizer + elif self.cfg.tokenizer_name is not None: + huggingface_token = os.environ.get("HF_TOKEN", None) + self.tokenizer = AutoTokenizer.from_pretrained( + self.cfg.tokenizer_name, + token=huggingface_token, + ) + else: + self.tokenizer = None + + if self.cfg.d_vocab == -1: + # If we have a tokenizer, vocab size can be inferred from it. + if self.tokenizer is None: + raise ValueError("Must provide a tokenizer if d_vocab is not provided") + + self.cfg.d_vocab = max(self.tokenizer.vocab.values()) + 1 + if self.cfg.d_vocab_out == -1: + self.cfg.d_vocab_out = self.cfg.d_vocab + + self.embed = Embed(self.cfg) + self.encoder = nn.ModuleList( + [ + T5Block(self.cfg, num_layer, is_decoder=False) + for num_layer in range(self.cfg.n_layers) + ] + ) + self.encoder_final_ln = RMSNorm(self.cfg) + self.decoder = nn.ModuleList( + [ + T5Block(self.cfg, num_layer, is_decoder=True) + for num_layer in range(self.cfg.n_layers) + ] + ) + self.decoder_final_ln = RMSNorm(self.cfg) + # self.lm_head = nn.Linear(self.cfg.d_model, self.cfg.d_vocab_out) + self.unembed = Unembed(self.cfg) + + self.hook_embed = HookPoint() + + if move_to_device: + self.to(self.cfg.device) + + self.setup() + + def forward( + self, + input: Int[torch.Tensor, "batch pos"], + decoder_input: Int[torch.Tensor, "batch decoder_pos"], + return_type: Optional[str] = "logits", + one_zero_attention_mask: Optional[Int[torch.Tensor, "batch pos"]] = None, + ) -> Optional[Float[torch.Tensor, "batch decoder_pos d_vocab"]]: + """Input must be a batch of tokens. Strings and lists of strings are not yet supported. + decoder_input: Int[torch.Tensor, "batch decoder_pos"]: The input to the decoder. This is the sequence of tokens that the model will generate, usually with a start token at the beginning + return_type Optional[str]: The type of output to return. Can be one of: None (return nothing, don't calculate logits), or 'logits' (return logits). + one_zero_attention_mask: Optional[torch.Tensor]: A binary mask which indicates which tokens should be attended to (1) and which should be ignored (0). Primarily used for padding variable-length sentences in a batch. For instance, in a batch with sentences of differing lengths, shorter sentences are padded with 0s on the right. If not provided, the model assumes all tokens should be attended to. + """ + + tokens = input + + if tokens.device.type != self.cfg.device: + tokens = tokens.to(self.cfg.device) + if one_zero_attention_mask is not None: + one_zero_attention_mask = one_zero_attention_mask.to(self.cfg.device) + + resid = self.hook_embed(self.embed(tokens)) + + if one_zero_attention_mask is not None: + additive_attention_mask = ( + repeat(1 - one_zero_attention_mask, "batch pos -> batch 1 1 pos") + ) * torch.finfo(self.cfg.dtype).min + else: + additive_attention_mask = None + + query_len = key_len = input.shape[1] + + encoder_positional_bias = self.encoder[0].attn.compute_relative_attention_bias( + query_len, key_len, device=self.cfg.device + ) + + for encoder_block in self.encoder: + resid = encoder_block( + resid_pre=resid, + additive_attention_mask=additive_attention_mask, + position_bias=encoder_positional_bias, + ) + + encoder_resid = self.encoder_final_ln(resid) + + decoder_resid = self.embed(decoder_input) + decoder_query_len = decoder_key_len = decoder_input.shape[1] + decoder_positional_bias = self.decoder[0].attn.compute_relative_attention_bias( + decoder_query_len, decoder_key_len, device=self.cfg.device + ) + + for decoder_block in self.decoder: + decoder_resid = decoder_block( + resid_pre=decoder_resid, + position_bias=decoder_positional_bias, + encoder_hidden_states=encoder_resid, + encoder_additive_attention_mask=additive_attention_mask, + ) + + decoder_resid = self.decoder_final_ln(decoder_resid) + + if self.cfg.tie_word_embeddings: + # Rescale output before projecting on vocab + # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 + decoder_resid *= self.cfg.d_model**-0.5 + + logits = self.unembed(decoder_resid) + if return_type is None: + return None + return logits + + @overload + def run_with_cache( + self, *model_args, return_cache_object: Literal[True] = True, **kwargs + ) -> Tuple[Float[torch.Tensor, "batch pos d_vocab"], ActivationCache]: + ... + + @overload + def run_with_cache( + self, *model_args, return_cache_object: Literal[False], **kwargs + ) -> Tuple[Float[torch.Tensor, "batch pos d_vocab"], Dict[str, torch.Tensor]]: + ... + + def run_with_cache( + self, + *model_args, + return_cache_object: bool = True, + remove_batch_dim: bool = False, + **kwargs, + ) -> Tuple[ + Float[torch.Tensor, "batch pos d_vocab"], + Union[ActivationCache, Dict[str, torch.Tensor]], + ]: + """ + Wrapper around run_with_cache in HookedRootModule. If return_cache_object is True, this will return an ActivationCache object, with a bunch of useful HookedTransformer specific methods, otherwise it will return a dictionary of activations as in HookedRootModule. This function was copied directly from HookedTransformer. + """ + out, cache_dict = super().run_with_cache( + *model_args, remove_batch_dim=remove_batch_dim, **kwargs + ) + if return_cache_object: + cache = ActivationCache(cache_dict, self, has_batch_dim=not remove_batch_dim) + return out, cache + else: + return out, cache_dict + + def to( # type: ignore + self, + device_or_dtype: Union[torch.device, str, torch.dtype], + print_details: bool = True, + ): + return devices.move_to_and_update_config(self, device_or_dtype, print_details) + + def cuda(self): + # Wrapper around cuda that also changes self.cfg.device + return self.to("cuda") + + def cpu(self): + # Wrapper around cuda that also changes self.cfg.device + return self.to("cpu") + + def mps(self): + # Wrapper around cuda that also changes self.cfg.device + return self.to("mps") + + @classmethod + def from_pretrained( + cls, + model_name: str, + checkpoint_index: Optional[int] = None, + checkpoint_value: Optional[int] = None, + hf_model=None, + device: Optional[str] = None, + tokenizer=None, + move_to_device=True, + dtype=torch.float32, + **from_pretrained_kwargs, + ) -> HookedEncoderDecoder: + """Loads in the pretrained weights from huggingface. Currently supports loading weight from HuggingFace BertForMaskedLM. Unlike HookedTransformer, this does not yet do any preprocessing on the model.""" + logging.warning( + "Support for T5 in TransformerLens is currently experimental, until such a time when it has feature " + "parity with HookedTransformer and has been tested on real research tasks. Until then, backward " + "compatibility is not guaranteed. Please see the docs for information on the limitations of the current " + "implementation." + "\n" + "If using T5 for interpretability research, keep in mind that T5 has some significant architectural " + "differences to GPT. The major one is that T5 is an Encoder-Decoder model" + "Also, it uses relative positional embeddings, different types of Attention (without bias) and LayerNorm" + ) + + if from_pretrained_kwargs.get("load_in_8bit", False) or from_pretrained_kwargs.get( + "load_in_4bit", False + ): + raise ValueError("Quantization not supported") + + if "torch_dtype" in from_pretrained_kwargs: + dtype = from_pretrained_kwargs["torch_dtype"] + + name_or_path = ( + model_name if Path(model_name).exists() else loading.get_official_model_name(model_name) + ) + + cfg = loading.get_pretrained_model_config( + name_or_path, + checkpoint_index=checkpoint_index, + checkpoint_value=checkpoint_value, + fold_ln=False, + device=device, + n_devices=1, + dtype=dtype, + **from_pretrained_kwargs, + ) + + state_dict = loading.get_pretrained_state_dict( + name_or_path, cfg, hf_model, dtype=dtype, **from_pretrained_kwargs + ) + + model = cls(cfg, tokenizer, move_to_device=False) + + model.load_state_dict(state_dict, strict=False) + + if move_to_device: + model.to(cfg.device) + + print(f"Loaded pretrained model {model_name} into HookedTransformer") + + return model + + @property + def W_U(self) -> Float[torch.Tensor, "d_model d_vocab"]: + """ + Convenience to get the unembedding matrix (ie the linear map from the final residual stream to the output logits) + """ + return self.unembed.W_U + + @property + def b_U(self) -> Float[torch.Tensor, "d_vocab"]: + """ + Convenience to get the unembedding bias + """ + return self.unembed.b_U + + @property + def W_E(self) -> Float[torch.Tensor, "d_vocab d_model"]: + """ + Convenience to get the embedding matrix + """ + return self.embed.W_E + + @property + def W_pos(self) -> None: + """ + Convenience function to get the positional embedding. Only works on models with absolute positional embeddings! + """ + raise NotImplementedError( + "T5 does not have absolute positional embeddings. Uses relative positional embeddings instead." + ) + + @property + def W_K(self) -> Float[torch.Tensor, "n_layers n_heads d_model d_head"]: + """Stacks the key weights across all layers""" + return torch.stack( + [cast(T5Block, block).attn.W_K for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def W_Q(self) -> Float[torch.Tensor, "n_layers n_heads d_model d_head"]: + """Stacks the query weights across all layers""" + return torch.stack( + [cast(T5Block, block).attn.W_Q for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def W_V(self) -> Float[torch.Tensor, "n_layers n_heads d_model d_head"]: + """Stacks the value weights across all layers""" + return torch.stack( + [cast(T5Block, block).attn.W_V for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def W_O(self) -> Float[torch.Tensor, "n_layers n_heads d_head d_model"]: + """Stacks the attn output weights across all layers""" + return torch.stack( + [cast(T5Block, block).attn.W_O for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def W_in(self) -> Float[torch.Tensor, "n_layers d_model d_mlp"]: + """Stacks the MLP input weights across all layers""" + return torch.stack( + [cast(T5Block, block).mlp.W_in for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def W_out(self) -> Float[torch.Tensor, "n_layers d_mlp d_model"]: + """Stacks the MLP output weights across all layers""" + return torch.stack( + [cast(T5Block, block).mlp.W_out for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def b_K(self) -> Float[torch.Tensor, "n_layers n_heads d_head"]: + """Stacks the key biases across all layers""" + return torch.stack( + [cast(T5Block, block).attn.b_K for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def b_Q(self) -> Float[torch.Tensor, "n_layers n_heads d_head"]: + """Stacks the query biases across all layers""" + return torch.stack( + [cast(T5Block, block).attn.b_Q for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def b_V(self) -> Float[torch.Tensor, "n_layers n_heads d_head"]: + """Stacks the value biases across all layers""" + return torch.stack( + [cast(T5Block, block).attn.b_V for block in chain(self.encoder, self.decoder)], + dim=0, + ) + + @property + def b_O(self) -> Float[torch.Tensor, "n_layers d_model"]: + """Stacks the attn output biases across all layers""" + return torch.stack( + [cast(T5Block, block).attn.b_O for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def b_in(self) -> Float[torch.Tensor, "n_layers d_mlp"]: + """Stacks the MLP input biases across all layers""" + return torch.stack( + [cast(T5Block, block).mlp.b_in for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def b_out(self) -> Float[torch.Tensor, "n_layers d_model"]: + """Stacks the MLP output biases across all layers""" + return torch.stack( + [cast(T5Block, block).mlp.b_out for block in chain(self.encoder, self.decoder)], dim=0 + ) + + @property + def QK(self) -> FactoredMatrix: # [n_layers, n_heads, d_model, d_model] + """Returns a FactoredMatrix object with the product of the Q and K matrices for each layer and head. + Useful for visualizing attention patterns.""" + return FactoredMatrix(self.W_Q, self.W_K.transpose(-2, -1)) + + @property + def OV(self) -> FactoredMatrix: # [n_layers, n_heads, d_model, d_model] + """Returns a FactoredMatrix object with the product of the O and V matrices for each layer and head.""" + return FactoredMatrix(self.W_V, self.W_O) + + def all_head_labels(self) -> List[str]: + """Returns a list of strings with the format "L{l}H{h}", where l is the layer index and h is the head index.""" + return [f"EL{l}H{h}" for l in range(self.cfg.n_layers) for h in range(self.cfg.n_heads)] + [ + f"DL{l}H{h}" for l in range(self.cfg.n_layers) for h in range(self.cfg.n_heads) + ] diff --git a/transformer_lens/HookedTransformer.py b/transformer_lens/HookedTransformer.py index 68b3c4c..8ee2e74 100644 --- a/transformer_lens/HookedTransformer.py +++ b/transformer_lens/HookedTransformer.py @@ -17,6 +17,7 @@ import numpy as np import torch import torch.nn as nn +import torch.nn.functional as F import tqdm.auto as tqdm from fancy_einsum import einsum from jaxtyping import Float, Int @@ -247,6 +248,7 @@ def input_to_embed( input: Union[str, List[str], Int[torch.Tensor, "batch pos"]], prepend_bos: Optional[Union[bool, None]] = USE_DEFAULT_VALUE, padding_side: Optional[Union[Literal["left", "right"], None]] = USE_DEFAULT_VALUE, + attention_mask: Optional[torch.Tensor] = None, past_kv_cache: Optional[HookedTransformerKeyValueCache] = None, ) -> Tuple[ Float[torch.Tensor, "batch pos d_model"], # residual @@ -283,7 +285,15 @@ def input_to_embed( if tokens.device.type != self.cfg.device: tokens = tokens.to(devices.get_device_for_block_index(0, self.cfg)) - if (self.tokenizer and self.tokenizer.padding_side == "left") or past_kv_cache is not None: + if attention_mask is not None: + assert attention_mask.shape == tokens.shape, ( + f"Attention mask shape {attention_mask.shape} does not match tokens shape " + f"{tokens.shape}" + ) + attention_mask = attention_mask.to(devices.get_device_for_block_index(0, self.cfg)) + elif ( + self.tokenizer and self.tokenizer.padding_side == "left" + ) or past_kv_cache is not None: # If the padding side is left or we are using caching, we need to compute the attention # mask for the adjustment of absolute positional embeddings and attention masking so # that pad tokens are not attended. @@ -488,9 +498,10 @@ def forward( shortformer_pos_embed: Optional[Float[torch.Tensor, "batch pos d_model"]]: Positional embedding for shortformer models. Only use if start_at_layer is not None and self.cfg.positional_embedding_type == "shortformer". - attention_mask: Optional[torch.Tensor]: The attention mask for padded tokens. Only use - if start_at_layer is not None and (self.tokenizer.padding_side == "left" or - past_kv_cache is not None). + attention_mask: Optional[torch.Tensor]: Override the attention mask used to ignore + padded tokens. If start_at_layer is not None and (self.tokenizer.padding_side == + "left" or past_kv_cache is not None), this should be passed as the attention mask + is not computed automatically. Defaults to None. stop_at_layer Optional[int]: If not None, stop the forward pass at the specified layer. Exclusive - ie, stop_at_layer = 0 will only run the embedding layer, stop_at_layer = 1 will run the embedding layer and the first transformer block, etc. Supports @@ -522,6 +533,7 @@ def forward( input, prepend_bos=prepend_bos, padding_side=padding_side, + attention_mask=attention_mask, past_kv_cache=past_kv_cache, ) else: @@ -565,13 +577,17 @@ def forward( return None else: logits = self.unembed(residual) # [batch, pos, d_vocab] + if self.cfg.output_logits_soft_cap > 0.0: + logits = self.cfg.output_logits_soft_cap * F.tanh( + logits / self.cfg.output_logits_soft_cap + ) if return_type == "logits": return logits else: assert ( tokens is not None ), "tokens must be passed in if return_type is 'loss' or 'both'" - loss = self.loss_fn(logits, tokens, per_token=loss_per_token) + loss = self.loss_fn(logits, tokens, attention_mask, per_token=loss_per_token) if return_type == "loss": return loss elif return_type == "both": @@ -584,6 +600,7 @@ def loss_fn( self, logits: Float[torch.Tensor, "batch pos d_vocab"], tokens: Int[torch.Tensor, "batch pos"], + attention_mask: Optional[Int[torch.Tensor, "batch pos"]] = None, per_token: bool = False, ): """Wrapper around `utils.lm_cross_entropy_loss`. @@ -592,7 +609,7 @@ def loss_fn( """ if tokens.device != logits.device: tokens = tokens.to(logits.device) - return utils.lm_cross_entropy_loss(logits, tokens, per_token) + return utils.lm_cross_entropy_loss(logits, tokens, attention_mask, per_token) @overload def run_with_cache( @@ -1028,6 +1045,7 @@ def move_model_modules_to_device(self): if self.cfg.positional_embedding_type != "rotary": self.pos_embed.to(devices.get_device_for_block_index(0, self.cfg)) self.hook_pos_embed.to(devices.get_device_for_block_index(0, self.cfg)) + if hasattr(self, "ln_final"): self.ln_final.to(devices.get_device_for_block_index(self.cfg.n_layers - 1, self.cfg)) self.unembed.to(devices.get_device_for_block_index(self.cfg.n_layers - 1, self.cfg)) @@ -1266,6 +1284,12 @@ def from_pretrained( "Setting center_writing_weights=False instead." ) center_writing_weights = False + if center_unembed and cfg.output_logits_soft_cap > 0.0: + logging.warning( + "You tried to specify center_unembed=True for a model using logit softcap, but this can't be done! Softcapping is not invariant upon adding a constant" + "Setting center_unembed=False instead." + ) + center_unembed = False # Get the state dict of the model (ie a mapping of parameter names to tensors), processed to # match the HookedTransformer parameter names. @@ -1938,7 +1962,7 @@ def process_weights_( for layer in self.blocks: layer.ln1 = LayerNormPre(self.cfg) layer.ln2 = LayerNormPre(self.cfg) - if self.cfg.act_fn is not None and self.cfg.act_fn.endswith("_ln"): + if self.cfg.is_layer_norm_activation(): layer.mlp.ln = LayerNormPre(self.cfg) elif fold_ln and self.cfg.normalization_type == "RMS": # We do the same for RMSNorm if used @@ -1947,7 +1971,7 @@ def process_weights_( for layer in self.blocks: layer.ln1 = RMSNormPre(self.cfg) layer.ln2 = RMSNormPre(self.cfg) - if self.cfg.act_fn is not None and self.cfg.act_fn.endswith("_ln"): + if self.cfg.is_layer_norm_activation(): layer.mlp.ln = RMSNormPre(self.cfg) self.load_and_process_state_dict( @@ -2281,7 +2305,7 @@ def OV(self): # Various utility functions def accumulated_bias( self, layer: int, mlp_input: bool = False, include_mlp_biases=True - ) -> Float[torch.Tensor, "layers_accumulated_over d_model"]: + ) -> Float[torch.Tensor, "d_model"]: """Accumulated Bias. Returns the accumulated bias from all layer outputs (ie the b_Os and b_outs), up to the diff --git a/transformer_lens/HookedTransformerConfig.py b/transformer_lens/HookedTransformerConfig.py index 1e1e595..6906de3 100644 --- a/transformer_lens/HookedTransformerConfig.py +++ b/transformer_lens/HookedTransformerConfig.py @@ -16,8 +16,7 @@ import torch from transformer_lens import utils - -SUPPORTED_ACTIVATIONS = ["relu", "gelu", "silu", "gelu_new", "solu_ln", "gelu_fast"] +from transformer_lens.utilities.activation_functions import SUPPORTED_ACTIVATIONS @dataclass @@ -55,6 +54,8 @@ class HookedTransformerConfig: attention head separately, with a hook. Defaults to false to save memory use_attn_scale (bool): whether to scale the attention weights by 1/sqrt(d_head) + attn_scale (float): The amount to divide attention scores by (if applicable). Defaults to + sqrt(d_head) model_name (str): the name of the model, used to load weights from HuggingFace or initialized to "custom" if not passed original_architecture (str, *optional*): the family of the model, used @@ -83,7 +84,8 @@ class HookedTransformerConfig: 'kaiming_normal'. MuP support to come. Defaults to 'gpt2'. normalization_type (str, *optional*): the type of normalization to use. Options are None (no normalization), 'LN' (use LayerNorm, including weights - & biases) and 'LNPre' (use LayerNorm, but no weights & biases). + & biases) and 'LNPre' (use LayerNorm, but no weights or biases), 'RMS' + (use RMSNorm, including weights) and 'RMSPre' (use RMSNorm, but no weights or biases). Defaults to LN device(str): The device to use for the model. Defaults to 'cuda' if available, else 'cpu'. Must be 'cuda' if `n_devices` > 1. @@ -128,7 +130,8 @@ class HookedTransformerConfig: rotary_dim (int, *optional*): The dimensionality of the rotary embeddings, may be d_head in which case only the first rotary_dim dimensions of each head are rotated. Defaults to None, if - positional_embedding_type=="rotary" it defaults to d_head. + positional_embedding_type=="rotary" post-init then sets it to d_head, i.e. "rotate all + dimensions of the query and key". n_params (int, *optional*): The number of (hidden weight) parameters in the model. This is automatically calculated and not intended to be set by the user. (Non embedding parameters, because @@ -159,6 +162,24 @@ class HookedTransformerConfig: must also be set. Set to None if not using MoE. experts_per_token (int, *optional*): The number of experts to use for each pass in the MoE layer. If set, num_experts must also be set. Set to None if not using MoE. + relative_attention_max_distance (int, *optional*): The maximum distance between tokens for relative + attention. If set, relative_attention_num_buckets must also be set.Only used in EncoderDecoder models, like T5. + relative_attention_num_buckets (int, *optional*): The number of buckets to use for relative attention. + If set, relative_attention_max_distance must also be set.Only used in EncoderDecoder models, like T5. + decoder_start_token_id (int, *optional*): The start token id for the decoder. Only used in EncoderDecoder models, like T5. + tie_word_embeddings (bool): Whether to tie the word embeddings and the output layer weights. Defaults to False. Only used in EncoderDecoder (T5) by now. + use_normalization_before_and_after (bool): Whether to apply normalization (LN/RMS/etc) + to both the input of an attn/MLP block *and* the output (before adding back to the + residual stream). Currently only used in Gemma-2. Defaults to False. + attn_scores_soft_cap (float): An optional softcap for attention scores pre-softmax. If + used, it will map attn_scores -> soft_cap * tanh(attn_scores / soft_cap). As tanh's + output is in [-1, 1], this maps attn_scores to [-soft_cap, soft_cap], with little + effect on small values, but squashing large values into that interval. Currently only + used in Gemma-2. Defaults to -1.0, which means not set. + output_logits_soft_cap (float): An optional softcap for output logits, currently only used + in Gemma-2 (see attn_scores_soft_cap for details). Defaults to -1.0, which means not + set. + """ n_layers: int @@ -173,6 +194,7 @@ class HookedTransformerConfig: eps: float = 1e-5 use_attn_result: bool = False use_attn_scale: bool = True + attn_scale: float = -1.0 use_split_qkv_input: bool = False use_hook_mlp_in: bool = False use_attn_in: bool = False @@ -214,6 +236,13 @@ class HookedTransformerConfig: load_in_4bit: bool = False num_experts: Optional[int] = None experts_per_token: Optional[int] = None + relative_attention_max_distance: Optional[int] = None + relative_attention_num_buckets: Optional[int] = None + decoder_start_token_id: Optional[int] = None + tie_word_embeddings: bool = False + use_normalization_before_and_after: bool = False + attn_scores_soft_cap: float = -1.0 + output_logits_soft_cap: float = -1.0 def __post_init__(self): if self.n_heads == -1: @@ -286,6 +315,9 @@ def __post_init__(self): torch.cuda.device_count() >= self.n_devices ), f"Not enough CUDA devices to support n_devices {self.n_devices}" + if self.use_attn_scale and self.attn_scale == -1.0: + self.attn_scale = np.sqrt(self.d_head) + assert self.default_prepend_bos in [ True, False, @@ -316,3 +348,6 @@ def set_seed_everywhere(self, seed: int): torch.manual_seed(seed) random.seed(seed) np.random.seed(seed) + + def is_layer_norm_activation(self) -> bool: + return self.act_fn is not None and self.act_fn.endswith("_ln") diff --git a/transformer_lens/__init__.py b/transformer_lens/__init__.py index e2fb148..0ed2635 100644 --- a/transformer_lens/__init__.py +++ b/transformer_lens/__init__.py @@ -6,12 +6,14 @@ HookedTransformerKeyValueCacheEntry, ) from . import components +from . import factories from .HookedTransformerConfig import HookedTransformerConfig from .FactoredMatrix import FactoredMatrix from .ActivationCache import ActivationCache from .HookedTransformer import HookedTransformer from .SVDInterpreter import SVDInterpreter from .HookedEncoder import HookedEncoder +from .HookedEncoderDecoder import HookedEncoderDecoder from . import head_detector from . import loading_from_pretrained as loading from . import patching diff --git a/transformer_lens/components/__init__.py b/transformer_lens/components/__init__.py index 4767742..3b908fe 100644 --- a/transformer_lens/components/__init__.py +++ b/transformer_lens/components/__init__.py @@ -4,6 +4,7 @@ needed to create many different types of generative language models. They are used by :class:`transformer_lens.HookedTransformer`. """ + # Independent classes from .abstract_attention import AbstractAttention from .layer_norm import LayerNorm @@ -18,12 +19,14 @@ from .attention import Attention from .bert_mlm_head import BertMLMHead from .embed import Embed -from .gated_mlp import GatedMLP from .grouped_query_attention import GroupedQueryAttention -from .mlp import MLP +from .mlps.gated_mlp import GatedMLP +from .mlps.mlp import MLP # Interdependent modules from .bert_block import BertBlock from .bert_embed import BertEmbed -from .moe import MoE +from .mlps.moe import MoE from .transformer_block import TransformerBlock +from .t5_attention import T5Attention +from .t5_block import T5Block diff --git a/transformer_lens/components/abstract_attention.py b/transformer_lens/components/abstract_attention.py index cc22519..3146de0 100644 --- a/transformer_lens/components/abstract_attention.py +++ b/transformer_lens/components/abstract_attention.py @@ -2,12 +2,10 @@ from typing import Dict, Optional, Tuple, Union import einops -import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from better_abc import abstract_attribute -from fancy_einsum import einsum from jaxtyping import Float, Int from transformers.utils import is_bitsandbytes_available @@ -15,6 +13,7 @@ from transformer_lens.hook_points import HookPoint from transformer_lens.HookedTransformerConfig import HookedTransformerConfig from transformer_lens.past_key_value_caching import HookedTransformerKeyValueCacheEntry +from transformer_lens.utilities.attention import complex_attn_linear, simple_attn_linear from transformer_lens.utils import get_offset_position_ids if is_bitsandbytes_available(): @@ -46,18 +45,24 @@ def __init__( self.cfg = HookedTransformerConfig.unwrap(cfg) if self.cfg.load_in_4bit: - nq = int((self.cfg.d_model * self.cfg.d_model) / 2) + nq = int((self.cfg.d_model * self.cfg.d_head * self.cfg.n_heads) / 2) self.W_Q = Params4bit(torch.empty(nq, 1, dtype=torch.uint8), requires_grad=False) self.W_O = Params4bit(torch.empty(nq, 1, dtype=torch.uint8), requires_grad=False) else: self.W_Q = nn.Parameter( torch.empty( - self.cfg.n_heads, self.cfg.d_model, self.cfg.d_head, dtype=self.cfg.dtype + self.cfg.n_heads, + self.cfg.d_model, + self.cfg.d_head, + dtype=self.cfg.dtype, ) ) self.W_O = nn.Parameter( torch.empty( - self.cfg.n_heads, self.cfg.d_head, self.cfg.d_model, dtype=self.cfg.dtype + self.cfg.n_heads, + self.cfg.d_head, + self.cfg.d_model, + dtype=self.cfg.dtype, ) ) self.W_K = abstract_attribute() @@ -79,7 +84,8 @@ def __init__( self.register_buffer("mask", causal_mask) elif self.attn_type == "local": # For local, this is banded, query - window_size < key <= query - assert isinstance(self.cfg.window_size, int) + if not isinstance(self.cfg.window_size, int): + raise ValueError("Window size must be an integer for local attention") self.register_buffer("mask", torch.triu(causal_mask, 1 - self.cfg.window_size)) else: raise ValueError(f"Invalid attention type: {self.attn_type}") @@ -90,11 +96,12 @@ def __init__( # attn_scale is a constant that we divide the attention scores by pre-softmax. I'm not entirely sure why it matters, but it's probably a mix of softmax not being scale invariant and numerical stability? if self.cfg.use_attn_scale: - self.attn_scale = np.sqrt(self.cfg.d_head) + self.attn_scale = self.cfg.attn_scale # Defaults to sqrt(d_head) else: self.attn_scale = 1.0 if self.cfg.scale_attn_by_inverse_layer_idx: - assert self.layer_id is not None # keep mypy happy + if self.layer_id is None: # keep mypy happy + raise ValueError("Layer ID must be provided to scale attention scores") self.attn_scale *= self.layer_id + 1 self.hook_k = HookPoint() # [batch, pos, head_index, d_head] @@ -113,7 +120,8 @@ def __init__( # Applies a rotation to each two-element chunk of keys and queries pre dot producting to bake in relative position. See HookedTransformerConfig for details self.hook_rot_k = HookPoint() self.hook_rot_q = HookPoint() - assert self.cfg.rotary_dim is not None # keep mypy happy + if self.cfg.rotary_dim is None: # keep mypy happy + raise ValueError("Rotary dim must be provided for rotary positional embeddings") sin, cos = self.calculate_sin_cos_rotary( self.cfg.rotary_dim, self.cfg.n_ctx, @@ -127,6 +135,10 @@ def __init__( # Note: While computationally efficient, initializing an bias with max n_ctx (16, 1024, 1024) of float32 will occupy ~256MiB of contiguous GPU memory, which may not be optimal for memory usage. self.alibi = None + elif self.cfg.positional_embedding_type == "relative_positional_bias": + # will be overwritten by the child T5Attention class + self.has_relative_attention_bias = False + @property def OV(self) -> FactoredMatrix: """ @@ -159,18 +171,19 @@ def forward( Float[torch.Tensor, "batch pos head_index d_model"], ], key_input: Union[ - Float[torch.Tensor, "batch pos d_model"], - Float[torch.Tensor, "batch pos head_index d_model"], - Float[torch.Tensor, "batch pos kv_head_index d_model"], + Float[torch.Tensor, "batch kv_pos d_model"], + Float[torch.Tensor, "batch kv_pos head_index d_model"], + Float[torch.Tensor, "batch kv_pos kv_head_index d_model"], ], value_input: Union[ - Float[torch.Tensor, "batch pos d_model"], - Float[torch.Tensor, "batch pos head_index d_model"], - Float[torch.Tensor, "batch pos kv_head_index d_model"], + Float[torch.Tensor, "batch kv_pos d_model"], + Float[torch.Tensor, "batch kv_pos head_index d_model"], + Float[torch.Tensor, "batch kv_pos kv_head_index d_model"], ], past_kv_cache_entry: Optional[HookedTransformerKeyValueCacheEntry] = None, - additive_attention_mask: Optional[Float[torch.Tensor, "batch 1 1 pos"]] = None, + additive_attention_mask: Optional[Float[torch.Tensor, "batch 1 1 kv_pos"]] = None, attention_mask: Optional[Int[torch.Tensor, "batch offset_pos"]] = None, + position_bias: Optional[Float[torch.Tensor, "1 head_index pos kv_pos"]] = None, ) -> Float[torch.Tensor, "batch pos d_model"]: """ shortformer_pos_embed is only used if self.cfg.positional_embedding_type == "shortformer", else defaults to None and is irrelevant. See HookedTransformerConfig for more details @@ -218,7 +231,20 @@ def forward( attn_scores += self.alibi[ :, :query_ctx, :key_ctx ] # [batch, head_index, query_pos, key_pos] + elif self.cfg.positional_embedding_type == "relative_positional_bias": + if position_bias is None: + if self.has_relative_attention_bias: + raise ValueError("Positional bias is required for relative_positional_bias") + else: + position_bias = torch.zeros( + 1, + self.cfg.n_heads, + attn_scores.shape[2], + attn_scores.shape[3], + device=attn_scores.device, + ) + attn_scores += position_bias if self.cfg.attention_dir == "causal": # If causal attention, we mask it to only attend backwards. If bidirectional, we don't mask. attn_scores = self.apply_causal_mask( @@ -237,47 +263,47 @@ def forward( if not self.cfg.use_attn_result: if self.cfg.load_in_4bit: # call bitsandbytes method to dequantize and multiply - out = bnb.matmul_4bit( - z.reshape(z.shape[0], z.shape[1], self.cfg.d_model), - self.W_O.t(), - # bias=self.W_O.t(), - bias=None, - quant_state=self.W_O.quant_state, - ) - +self.b_O - else: out = ( - ( - einsum( - "batch pos head_index d_head, \ - head_index d_head d_model -> \ - batch pos d_model", - z, - self.W_O, - ) + bnb.matmul_4bit( + z.reshape(z.shape[0], z.shape[1], self.cfg.d_head * self.cfg.n_heads), + self.W_O.t(), + # bias=self.W_O.t(), + bias=None, + quant_state=self.W_O.quant_state, ) + self.b_O - ) # [batch, pos, d_model] + ) + else: + w = einops.rearrange( + self.W_O, "head_index d_head d_model -> d_model (head_index d_head)" + ) + out = F.linear( + z.reshape(z.shape[0], z.shape[1], self.cfg.d_head * self.cfg.n_heads), + w, + self.b_O, + ) else: # Explicitly calculate the attention result so it can be accessed by a hook # This is off by default because it can easily eat through your GPU memory. if self.cfg.load_in_4bit: result = self.hook_result( bnb.matmul_4bit( - z.reshape(z.shape[0], z.shape[1], self.cfg.d_model), + z.reshape(z.shape[0], z.shape[1], self.cfg.d_head * self.cfg.n_heads), self.W_O.t(), bias=None, quant_state=self.W_O.quant_state, ) ) else: + w = einops.rearrange( + self.W_O, + "head_index d_head d_model -> d_model head_index d_head", + ) result = self.hook_result( - einsum( - "batch pos head_index d_head, \ - head_index d_head d_model -> \ - batch pos head_index d_model", + einops.einsum( z, - self.W_O, + w, + "... head_index d_head, d_model head_index d_head -> ... head_index d_model", ) ) # [batch, pos, head_index, d_model] out = ( @@ -293,23 +319,23 @@ def calculate_qkv_matrices( Float[torch.Tensor, "batch pos head_index d_model"], ], key_input: Union[ - Float[torch.Tensor, "batch pos d_model"], - Float[torch.Tensor, "batch pos head_index d_model"], + Float[torch.Tensor, "batch kv_pos d_model"], + Float[torch.Tensor, "batch kv_pos head_index d_model"], ], value_input: Union[ - Float[torch.Tensor, "batch pos d_model"], - Float[torch.Tensor, "batch pos head_index d_model"], + Float[torch.Tensor, "batch kv_pos d_model"], + Float[torch.Tensor, "batch kv_pos head_index d_model"], ], ) -> Tuple[ Float[torch.Tensor, "batch pos head_index d_head"], - Float[torch.Tensor, "batch pos head_index d_head"], - Float[torch.Tensor, "batch pos head_index d_head"], + Float[torch.Tensor, "batch kv_pos head_index d_head"], + Float[torch.Tensor, "batch kv_pos head_index d_head"], ]: - if self.cfg.use_split_qkv_input or self.cfg.use_attn_in: - qkv_einops_string = "batch pos head_index d_model" - else: - qkv_einops_string = "batch pos d_model" - + attn_fn = ( + complex_attn_linear + if self.cfg.use_split_qkv_input or self.cfg.use_attn_in + else simple_attn_linear + ) if self.cfg.load_in_4bit: q = self.hook_q( # call bitsandbytes method to dequantize and multiply @@ -327,17 +353,10 @@ def calculate_qkv_matrices( + self.b_Q ) else: - q = self.hook_q( - einsum( - f"{qkv_einops_string}, head_index d_model d_head \ - -> batch pos head_index d_head", - query_input, - self.W_Q, - ) - + self.b_Q - ) # [batch, pos, head_index, d_head] + q = self.hook_q(attn_fn(query_input, self.W_Q, self.b_Q)) if self.cfg.load_in_4bit: - assert isinstance(self.W_K, Params4bit) + if not isinstance(self.W_K, Params4bit): + raise ValueError("W_K must be a Params4bit object if load_in_4bit is True") k = self.hook_k( # call bitsandbytes method to dequantize and multiply bnb.matmul_4bit( @@ -351,18 +370,11 @@ def calculate_qkv_matrices( + self.b_K ) else: - k = self.hook_k( - einsum( - f"{qkv_einops_string}, head_index d_model d_head \ - -> batch pos head_index d_head", - key_input, - self.W_K, - ) - + self.b_K - ) # [batch, pos, head_index, d_head] + k = self.hook_k(attn_fn(key_input, self.W_K, self.b_K)) if self.cfg.load_in_4bit: - assert isinstance(self.W_V, Params4bit) + if not isinstance(self.W_V, Params4bit): + raise ValueError("W_V must be a Params4bit object if load_in_4bit is True") v = self.hook_v( # call bitsandbytes method to dequantize and multiply bnb.matmul_4bit( @@ -379,15 +391,8 @@ def calculate_qkv_matrices( + self.b_V ) else: - v = self.hook_v( - einsum( - f"{qkv_einops_string}, head_index d_model d_head \ - -> batch pos head_index d_head", - value_input, - self.W_V, - ) - + self.b_V - ) # [batch, pos, head_index, d_head] + v = self.hook_v(attn_fn(value_input, self.W_V, self.b_V)) + return q, k, v def calculate_attention_scores( @@ -395,16 +400,17 @@ def calculate_attention_scores( q: Float[torch.Tensor, "batch query_pos head_index d_head"], k: Float[torch.Tensor, "batch key_pos head_index d_head"], ) -> Float[torch.Tensor, "batch head_index query_pos key_pos"]: - attn_scores = ( - einsum( - "batch query_pos head_index d_head, \ - batch key_pos head_index d_head \ - -> batch head_index query_pos key_pos", - q, - k, - ) - / self.attn_scale + q_ = einops.rearrange( + q, "batch query_pos head_index d_head -> batch head_index query_pos d_head" + ) + k_ = einops.rearrange( + k, "batch key_pos head_index d_head -> batch head_index d_head key_pos" ) + attn_scores = q_ @ k_ / self.attn_scale + if self.cfg.attn_scores_soft_cap > 0: + attn_scores = self.cfg.attn_scores_soft_cap * F.tanh( + attn_scores / self.cfg.attn_scores_soft_cap + ) return attn_scores def calculate_z_scores( @@ -412,13 +418,17 @@ def calculate_z_scores( v: Float[torch.Tensor, "batch key_pos head_index d_head"], pattern: Float[torch.Tensor, "batch head_index query_pos key_pos"], ) -> Float[torch.Tensor, "batch query_pos head_index d_head"]: + v_ = einops.rearrange( + v, "batch key_pos head_index d_head -> batch head_index key_pos d_head" + ) + pattern_ = einops.rearrange( + pattern, + "batch head_index query_pos key_pos -> batch head_index query_pos key_pos", + ) z = self.hook_z( - einsum( - "batch key_pos head_index d_head, \ - batch head_index query_pos key_pos -> \ - batch query_pos head_index d_head", - v, - pattern, + einops.rearrange( + pattern_ @ v_, + "batch head_index query_pos d_head -> batch query_pos head_index d_head", ) ) return z @@ -435,9 +445,10 @@ def apply_causal_mask( # If not caching, query_ctx_length == key_ctx_length key_ctx_length = attn_scores.size(-1) - assert ( - query_ctx_length + past_kv_pos_offset == key_ctx_length - ), f"query_ctx_length {query_ctx_length} + past_kv_pos_offset {past_kv_pos_offset} != key_ctx_length {key_ctx_length} - you likely have a bug." + if query_ctx_length + past_kv_pos_offset != key_ctx_length: + raise ValueError( + f"query_ctx_length {query_ctx_length} + past_kv_pos_offset {past_kv_pos_offset} != key_ctx_length {key_ctx_length} - you likely have a bug." + ) # Index back to front to ensure local attention works final_mask = self.mask[None, None, -query_ctx_length:, -key_ctx_length:] # [1, 1, pos, pos] diff --git a/transformer_lens/components/bert_block.py b/transformer_lens/components/bert_block.py index 3740d91..ba6954b 100644 --- a/transformer_lens/components/bert_block.py +++ b/transformer_lens/components/bert_block.py @@ -8,7 +8,8 @@ import torch.nn as nn from jaxtyping import Float -from transformer_lens.components import MLP, Attention, LayerNorm +from transformer_lens.components import Attention, LayerNorm +from transformer_lens.factories.mlp_factory import MLPFactory from transformer_lens.hook_points import HookPoint from transformer_lens.HookedTransformerConfig import HookedTransformerConfig from transformer_lens.utils import repeat_along_head_dimension @@ -25,7 +26,7 @@ def __init__(self, cfg: HookedTransformerConfig): self.attn = Attention(cfg) self.ln1 = LayerNorm(cfg) - self.mlp = MLP(cfg) + self.mlp = MLPFactory.create_mlp(self.cfg) self.ln2 = LayerNorm(cfg) self.hook_q_input = HookPoint() # [batch, pos, n_heads, d_model] diff --git a/transformer_lens/components/bert_embed.py b/transformer_lens/components/bert_embed.py index 058637a..2c47f79 100644 --- a/transformer_lens/components/bert_embed.py +++ b/transformer_lens/components/bert_embed.py @@ -22,10 +22,10 @@ class BertEmbed(nn.Module): def __init__(self, cfg: Union[Dict, HookedTransformerConfig]): super().__init__() self.cfg = HookedTransformerConfig.unwrap(cfg) - self.embed = Embed(cfg) - self.pos_embed = PosEmbed(cfg) - self.token_type_embed = TokenTypeEmbed(cfg) - self.ln = LayerNorm(cfg) + self.embed = Embed(self.cfg) + self.pos_embed = PosEmbed(self.cfg) + self.token_type_embed = TokenTypeEmbed(self.cfg) + self.ln = LayerNorm(self.cfg) self.hook_embed = HookPoint() self.hook_pos_embed = HookPoint() diff --git a/transformer_lens/components/embed.py b/transformer_lens/components/embed.py index 74c048a..97bec7f 100644 --- a/transformer_lens/components/embed.py +++ b/transformer_lens/components/embed.py @@ -1,6 +1,6 @@ """Hooked Transformer Embed Component. -This module contains all the component :class:`BertMLMHead`. +This module contains all the component :class:`Embed`. """ from typing import Dict, Union diff --git a/transformer_lens/components/gated_mlp.py b/transformer_lens/components/gated_mlp.py deleted file mode 100644 index 70a33b2..0000000 --- a/transformer_lens/components/gated_mlp.py +++ /dev/null @@ -1,143 +0,0 @@ -"""Hooked Transformer Gated MLP Component. - -This module contains all the component :class:`GatedMLP`. -""" -from typing import Callable, Dict, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fancy_einsum import einsum -from jaxtyping import Float -from transformers.utils import is_bitsandbytes_available - -from transformer_lens.components import LayerNorm, LayerNormPre -from transformer_lens.hook_points import HookPoint -from transformer_lens.HookedTransformerConfig import HookedTransformerConfig -from transformer_lens.utils import gelu_fast, gelu_new, solu - -if is_bitsandbytes_available(): - import bitsandbytes as bnb - from bitsandbytes.nn.modules import Params4bit - - -# TODO -# not sure whether to fold this into MLP or not -class GatedMLP(nn.Module): - """ - The equation of a gated MLP: - pre = x @ W_gate - pre_linear = x @ W_in - post = Gelu(pre) * (pre_linear) + b_in - mlp_out = post @ W_out + b_out - - In one equation, mlp_out = (Gelu(x @ W_gate) * (x @ W_in) + b_in) @ W_out + b_out - """ - - act_fn: Callable[..., torch.Tensor] - ln: nn.Module - - def __init__(self, cfg: Union[Dict, HookedTransformerConfig]): - super().__init__() - self.cfg = HookedTransformerConfig.unwrap(cfg) - assert self.cfg.d_mlp is not None # keep mypy happy - - if self.cfg.load_in_4bit: - nq = int((self.cfg.d_model * self.cfg.d_mlp) / 2) - self.W_in = Params4bit(torch.empty(nq, 1, dtype=torch.uint8), requires_grad=False) - self.W_gate = Params4bit(torch.empty(nq, 1, dtype=torch.uint8), requires_grad=False) - self.W_out = Params4bit(torch.empty(nq, 1, dtype=torch.uint8), requires_grad=False) - else: - self.W_in = nn.Parameter( - torch.empty(self.cfg.d_model, self.cfg.d_mlp, dtype=self.cfg.dtype) - ) - self.W_gate = nn.Parameter( - torch.empty(self.cfg.d_model, self.cfg.d_mlp, dtype=self.cfg.dtype) - ) - self.W_out = nn.Parameter( - torch.empty(self.cfg.d_mlp, self.cfg.d_model, dtype=self.cfg.dtype) - ) - - self.b_in = nn.Parameter(torch.zeros(self.cfg.d_mlp, dtype=self.cfg.dtype)) - self.b_out = nn.Parameter(torch.zeros(self.cfg.d_model, dtype=self.cfg.dtype)) - - # hook on gate output but before act_fn - self.hook_pre = HookPoint() # [batch, pos, d_mlp] - # hook on the linear component of the input - self.hook_pre_linear = HookPoint() # [batch, pos, d_mlp] - # hook on act_fn(gate_output) * W_in(x) + b_in - self.hook_post = HookPoint() # [batch, pos, d_mlp] - - if self.cfg.act_fn == "relu": - self.act_fn = F.relu - elif self.cfg.act_fn == "gelu": - self.act_fn = F.gelu - elif self.cfg.act_fn == "silu": - self.act_fn = F.silu - elif self.cfg.act_fn == "gelu_new": - self.act_fn = gelu_new - elif self.cfg.act_fn == "gelu_fast": - self.act_fn = gelu_fast - elif self.cfg.act_fn == "solu_ln": - self.act_fn = solu - # Hook taken between activation and layer norm - self.hook_mid = HookPoint() # [batch, pos, d_mlp] - if self.cfg.normalization_type == "LN": - self.ln = LayerNorm(self.cfg, self.cfg.d_mlp) - else: - self.ln = LayerNormPre(self.cfg) - - else: - raise ValueError(f"Invalid activation function name: {self.cfg.act_fn}") - - def forward( - self, x: Float[torch.Tensor, "batch pos d_model"] - ) -> Float[torch.Tensor, "batch pos d_model"]: - # Technically, all these einsums could be done with a single matmul, but this is more readable. - if self.cfg.load_in_4bit: - pre_act = self.hook_pre( - bnb.matmul_4bit(x, self.W_gate.t(), bias=None, quant_state=self.W_gate.quant_state) - ) - else: - pre_act = self.hook_pre( - einsum( - "batch pos d_model, d_model d_mlp -> batch pos d_mlp", - x, - self.W_gate, - ) - ) # [batch, pos, d_mlp] - - if self.cfg.act_fn is not None and not self.cfg.act_fn.endswith("_ln"): - if self.cfg.load_in_4bit: - pre_linear = self.hook_pre_linear( - bnb.matmul_4bit(x, self.W_in.t(), bias=None, quant_state=self.W_in.quant_state) - ) - else: - pre_linear = self.hook_pre_linear( - einsum( - "batch pos d_model, d_model d_mlp -> batch pos d_mlp", - x, - self.W_in, - ) - ) - - post_act = self.hook_post( - (self.act_fn(pre_act) * pre_linear) + self.b_in - ) # [batch, pos, d_mlp] - else: - mid_act = self.hook_mid(self.act_fn(pre_act)) # [batch, pos, d_mlp] - post_act = self.hook_post(self.ln(mid_act)) - - if self.cfg.load_in_4bit: - return bnb.matmul_4bit( - post_act, self.W_out.t(), bias=None, quant_state=self.W_out.quant_state - ) - else: - return ( - einsum( - "batch pos d_mlp, d_mlp d_model -> batch pos d_model", - post_act, - self.W_out, - ) - + self.b_out - ) diff --git a/transformer_lens/components/grouped_query_attention.py b/transformer_lens/components/grouped_query_attention.py index 45067e7..0681518 100644 --- a/transformer_lens/components/grouped_query_attention.py +++ b/transformer_lens/components/grouped_query_attention.py @@ -2,11 +2,11 @@ import torch import torch.nn as nn -from fancy_einsum import einsum from jaxtyping import Float from transformer_lens.components import AbstractAttention from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.utilities.attention import complex_attn_linear, simple_attn_linear class GroupedQueryAttention(AbstractAttention): @@ -17,7 +17,7 @@ def __init__( layer_id: Union[int, None] = None, ): """Grouped Query Attention Block - see https://arxiv.org/abs/2305.13245 for details. - Similar to regular attention, W_Q, W_K, and W_V all have shape [head_index, d_model, d_head] and W_Q has shape [head_index, d_head, d_model]. + Similar to regular attention, W_Q, W_K, and W_V all have shape [head_index, d_model, d_head]. However, under the hood the key and value weights _W_K and _W_V are stored with shape [n_key_value_heads, d_model, d_head] and are expanded when the corresponding properties' getter is called. Similarly, during a forward pass, initially K and V are kept in shapes [batch, pos, n_key_value_heads, d_head] and will only be expanded to shapes [batch, pos, n_heads, d_head] using torch.repeat_interleave when the attention pattern and z-scores are calculated. @@ -117,38 +117,20 @@ def calculate_qkv_matrices( Tuple[Float[torch.Tensor, "batch pos head_index d_head"], Float[torch.Tensor, "batch pos kv_head_index d_head"], Float[torch.Tensor, "batch pos kv_head_index d_head"]]: A tuple containing the Q, K, and V matrices with the specified shapes. """ - if self.cfg.use_split_qkv_input or self.cfg.use_attn_in: - kv_einops_string = "batch pos kv_head_index d_model" - q_einops_string = "batch pos head_index d_model" - else: - kv_einops_string = q_einops_string = "batch pos d_model" + attn_fn = ( + complex_attn_linear + if self.cfg.use_split_qkv_input or self.cfg.use_attn_in + else simple_attn_linear + ) q = self.hook_q( - einsum( - f"{q_einops_string}, head_index d_model d_head \ - -> batch pos head_index d_head", - query_input, - self.W_Q, - ) - + self.b_Q + attn_fn(query_input, self.W_Q, self.b_Q) ) # [batch, pos, head_index, d_head] k = self.hook_k( - einsum( - f"{kv_einops_string}, kv_head_index d_model d_head \ - -> batch pos kv_head_index d_head", - key_input, - self._W_K, - ) - + self._b_K + attn_fn(key_input, self._W_K, self._b_K) ) # [batch, pos, head_index, d_head] v = self.hook_v( - einsum( - f"{kv_einops_string}, kv_head_index d_model d_head \ - -> batch pos kv_head_index d_head", - value_input, - self._W_V, - ) - + self._b_V + attn_fn(value_input, self._W_V, self._b_V) ) # [batch, pos, head_index, d_head] return q, k, v diff --git a/transformer_lens/components/mlp.py b/transformer_lens/components/mlp.py deleted file mode 100644 index 6e5e942..0000000 --- a/transformer_lens/components/mlp.py +++ /dev/null @@ -1,81 +0,0 @@ -"""Hooked Transformer MLP Component. - -This module contains all the component :class:`MLP`. -""" -from typing import Callable, Dict, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fancy_einsum import einsum -from jaxtyping import Float - -from transformer_lens.components import LayerNorm, LayerNormPre -from transformer_lens.hook_points import HookPoint -from transformer_lens.HookedTransformerConfig import HookedTransformerConfig -from transformer_lens.utils import gelu_fast, gelu_new, solu - - -# MLP Layers -class MLP(nn.Module): - act_fn: Callable[..., torch.Tensor] - ln: nn.Module - - def __init__(self, cfg: Union[Dict, HookedTransformerConfig]): - super().__init__() - self.cfg = HookedTransformerConfig.unwrap(cfg) - assert self.cfg.d_mlp is not None # TODO: should this not be optional? - self.W_in = nn.Parameter( - torch.empty(self.cfg.d_model, self.cfg.d_mlp, dtype=self.cfg.dtype) - ) - self.b_in = nn.Parameter(torch.zeros(self.cfg.d_mlp, dtype=self.cfg.dtype)) - self.W_out = nn.Parameter( - torch.empty(self.cfg.d_mlp, self.cfg.d_model, dtype=self.cfg.dtype) - ) - self.b_out = nn.Parameter(torch.zeros(self.cfg.d_model, dtype=self.cfg.dtype)) - - self.hook_pre = HookPoint() # [batch, pos, d_mlp] - self.hook_post = HookPoint() # [batch, pos, d_mlp] - - if self.cfg.act_fn == "relu": - self.act_fn = F.relu - elif self.cfg.act_fn == "gelu": - self.act_fn = F.gelu - elif self.cfg.act_fn == "silu": - self.act_fn = F.silu - elif self.cfg.act_fn == "gelu_new": - self.act_fn = gelu_new - elif self.cfg.act_fn == "gelu_fast": - self.act_fn = gelu_fast - elif self.cfg.act_fn == "solu_ln": - self.act_fn = solu - # Hook taken between activation and layer norm - self.hook_mid = HookPoint() # [batch, pos, d_mlp] - if self.cfg.normalization_type == "LN": - self.ln = LayerNorm(self.cfg, self.cfg.d_mlp) - else: - self.ln = LayerNormPre(self.cfg) - - else: - raise ValueError(f"Invalid activation function name: {self.cfg.act_fn}") - - def forward( - self, x: Float[torch.Tensor, "batch pos d_model"] - ) -> Float[torch.Tensor, "batch pos d_model"]: - # Technically, all these einsums could be done with a single matmul, but this is more readable. - pre_act = self.hook_pre( - einsum("batch pos d_model, d_model d_mlp -> batch pos d_mlp", x, self.W_in) + self.b_in - ) # [batch, pos, d_mlp] - if self.cfg.act_fn is not None and not self.cfg.act_fn.endswith("_ln"): - post_act = self.hook_post(self.act_fn(pre_act)) # [batch, pos, d_mlp] - else: - mid_act = self.hook_mid(self.act_fn(pre_act)) # [batch, pos, d_mlp] - post_act = self.hook_post(self.ln(mid_act)) - return ( - einsum( - "batch pos d_mlp, d_mlp d_model -> batch pos d_model", - post_act, - self.W_out, - ) - + self.b_out - ) diff --git a/transformer_lens/components/mlps/can_be_used_as_mlp.py b/transformer_lens/components/mlps/can_be_used_as_mlp.py new file mode 100644 index 0000000..b094527 --- /dev/null +++ b/transformer_lens/components/mlps/can_be_used_as_mlp.py @@ -0,0 +1,75 @@ +"""Can Be Used as MLP component. + +This module serves as the base for everything within TransformerLens that can be used like an MLP. +This does not necessarily mean that every component extending this class will be an MLP, but +everything extending this class can be used interchangeably for an MLP. +""" +from typing import Dict, Optional, Union + +import torch +import torch.nn as nn +from jaxtyping import Float + +from transformer_lens.components import LayerNorm, LayerNormPre +from transformer_lens.factories.activation_function_factory import ( + ActivationFunctionFactory, +) +from transformer_lens.hook_points import HookPoint +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.utilities.activation_functions import ActivationFunction + + +class CanBeUsedAsMLP(nn.Module): + # The actual activation function + act_fn: ActivationFunction + + # The full config object for the model + cfg: HookedTransformerConfig + + # The d mlp value pulled out of the config to make sure it always has a value + d_mlp: int + + # The middle hook point will be None unless it specifically should be used + hook_mid: Optional[HookPoint] # [batch, pos, d_mlp] + + # The layer norm component if the activation function is a layer norm + ln: Optional[nn.Module] + + def __init__(self, cfg: Union[Dict, HookedTransformerConfig]): + """The base init for all MLP like components + + Args: + config (Union[Dict, HookedTransformerConfig]): The config for this instance + + Raises: + ValueError: If there is a misconfiguration + """ + super().__init__() + self.cfg = HookedTransformerConfig.unwrap(cfg) + if self.cfg.d_mlp is None: + raise ValueError("d_mlp must be set to use an MLP") + + self.d_mlp = self.cfg.d_mlp + + def forward( + self, x: Float[torch.Tensor, "batch pos d_model"] + ) -> Float[torch.Tensor, "batch pos d_model"]: + """The format for all forward functions for any MLP""" + return x + + def select_activation_function(self) -> None: + """This function should be called by all components in their init to get everything needed + for activation functions setup. + + Raises: + ValueError: If the configure activation function is not supported. + """ + + self.act_fn = ActivationFunctionFactory.pick_activation_function(self.cfg) + + if self.cfg.is_layer_norm_activation(): + self.hook_mid = HookPoint() + if self.cfg.normalization_type == "LN": + self.ln = LayerNorm(self.cfg, self.d_mlp) + else: + self.ln = LayerNormPre(self.cfg) diff --git a/transformer_lens/components/mlps/gated_mlp.py b/transformer_lens/components/mlps/gated_mlp.py new file mode 100644 index 0000000..438e9cd --- /dev/null +++ b/transformer_lens/components/mlps/gated_mlp.py @@ -0,0 +1,73 @@ +"""Hooked Transformer Gated MLP Component. + +This module contains all the component :class:`GatedMLP`. +""" +from typing import Dict, Union + +import torch +import torch.nn as nn +from jaxtyping import Float +from transformers.utils import is_bitsandbytes_available + +from transformer_lens.components.mlps.can_be_used_as_mlp import CanBeUsedAsMLP +from transformer_lens.hook_points import HookPoint +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.utilities.addmm import batch_addmm + +if is_bitsandbytes_available(): + pass + + +class GatedMLP(CanBeUsedAsMLP): + """ + The equation of a gated MLP: + pre = x @ W_gate + pre_linear = x @ W_in + post = Gelu(pre) * (pre_linear) + b_in + mlp_out = post @ W_out + b_out + + In one equation, mlp_out = (Gelu(x @ W_gate) * (x @ W_in) + b_in) @ W_out + b_out + """ + + def __init__(self, cfg: Union[Dict, HookedTransformerConfig]): + super().__init__(cfg) + self.select_activation_function() + self.W_in = nn.Parameter(torch.empty(self.cfg.d_model, self.d_mlp, dtype=self.cfg.dtype)) + self.W_out = nn.Parameter(torch.empty(self.d_mlp, self.cfg.d_model, dtype=self.cfg.dtype)) + self.W_gate = nn.Parameter(torch.empty(self.cfg.d_model, self.d_mlp, dtype=self.cfg.dtype)) + + self.b_in = nn.Parameter(torch.zeros(self.d_mlp, dtype=self.cfg.dtype)) + self.b_out = nn.Parameter(torch.zeros(self.cfg.d_model, dtype=self.cfg.dtype)) + + # hook on gate output but before act_fn + self.hook_pre = HookPoint() # [batch, pos, d_mlp] + # hook on the linear component of the input + self.hook_pre_linear = HookPoint() # [batch, pos, d_mlp] + # hook on act_fn(gate_output) * W_in(x) + b_in + self.hook_post = HookPoint() # [batch, pos, d_mlp] + + def forward( + self, x: Float[torch.Tensor, "batch pos d_model"] + ) -> Float[torch.Tensor, "batch pos d_model"]: + # Technically, all these einsums could be done with a single matmul, but this is more readable. + pre_act = self.hook_pre( + torch.matmul(x, self.W_gate) # batch pos d_model, d_model d_mlp -> batch pos d_mlp + ) # [batch, pos, d_mlp] + + if ( + self.cfg.is_layer_norm_activation() + and self.hook_mid is not None + and self.ln is not None + ): + mid_act = self.hook_mid(self.act_fn(pre_act)) # [batch, pos, d_mlp] + post_act = self.hook_post(self.ln(mid_act)) + else: + pre_linear = self.hook_pre_linear( + torch.matmul(x, self.W_in) # batch pos d_model, d_model d_mlp -> batch pos d_mlp + ) + + post_act = self.hook_post( + (self.act_fn(pre_act) * pre_linear) + self.b_in + ) # [batch, pos, d_mlp] + + return batch_addmm(self.b_out, self.W_out, post_act) diff --git a/transformer_lens/components/mlps/gated_mlp_4bit.py b/transformer_lens/components/mlps/gated_mlp_4bit.py new file mode 100644 index 0000000..708a7d1 --- /dev/null +++ b/transformer_lens/components/mlps/gated_mlp_4bit.py @@ -0,0 +1,77 @@ +"""Hooked Transformer Gated MLP Component. + +This module contains all the component :class:`GatedMLP`. +""" +from typing import Dict, Union + +import torch +import torch.nn as nn +from jaxtyping import Float +from transformers.utils import is_bitsandbytes_available + +from transformer_lens.components.mlps.can_be_used_as_mlp import CanBeUsedAsMLP +from transformer_lens.hook_points import HookPoint +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + +if is_bitsandbytes_available(): + import bitsandbytes as bnb + from bitsandbytes.nn.modules import Params4bit + + +class GatedMLP4Bit(CanBeUsedAsMLP): + """ + The equation of a gated MLP: + pre = x @ W_gate + pre_linear = x @ W_in + post = Gelu(pre) * (pre_linear) + b_in + mlp_out = post @ W_out + b_out + + In one equation, mlp_out = (Gelu(x @ W_gate) * (x @ W_in) + b_in) @ W_out + b_out + """ + + def __init__(self, cfg: Union[Dict, HookedTransformerConfig]): + super().__init__(cfg) + self.select_activation_function() + + nq = int((self.cfg.d_model * self.d_mlp) / 2) + self.W_in = Params4bit(torch.empty(nq, 1, dtype=torch.uint8), requires_grad=False) + self.W_gate = Params4bit(torch.empty(nq, 1, dtype=torch.uint8), requires_grad=False) + self.W_out = Params4bit(torch.empty(nq, 1, dtype=torch.uint8), requires_grad=False) + + self.b_in = nn.Parameter(torch.zeros(self.d_mlp, dtype=self.cfg.dtype)) + self.b_out = nn.Parameter(torch.zeros(self.cfg.d_model, dtype=self.cfg.dtype)) + + # hook on gate output but before act_fn + self.hook_pre = HookPoint() # [batch, pos, d_mlp] + # hook on the linear component of the input + self.hook_pre_linear = HookPoint() # [batch, pos, d_mlp] + # hook on act_fn(gate_output) * W_in(x) + b_in + self.hook_post = HookPoint() # [batch, pos, d_mlp] + + def forward( + self, x: Float[torch.Tensor, "batch pos d_model"] + ) -> Float[torch.Tensor, "batch pos d_model"]: + # Technically, all these einsums could be done with a single matmul, but this is more readable. + pre_act = self.hook_pre( + bnb.matmul_4bit(x, self.W_gate.t(), bias=None, quant_state=self.W_gate.quant_state) + ) + + if ( + self.cfg.is_layer_norm_activation() + and self.hook_mid is not None + and self.ln is not None + ): + mid_act = self.hook_mid(self.act_fn(pre_act)) # [batch, pos, d_mlp] + post_act = self.hook_post(self.ln(mid_act)) + else: + pre_linear = self.hook_pre_linear( + bnb.matmul_4bit(x, self.W_in.t(), bias=None, quant_state=self.W_in.quant_state) + ) + + post_act = self.hook_post( + (self.act_fn(pre_act) * pre_linear) + self.b_in + ) # [batch, pos, d_mlp] + + return bnb.matmul_4bit( + post_act, self.W_out.t(), bias=None, quant_state=self.W_out.quant_state + ) diff --git a/transformer_lens/components/mlps/mlp.py b/transformer_lens/components/mlps/mlp.py new file mode 100644 index 0000000..2da3484 --- /dev/null +++ b/transformer_lens/components/mlps/mlp.py @@ -0,0 +1,49 @@ +"""Hooked Transformer MLP Component. + +This module contains all the component :class:`MLP`. +""" + +from typing import Dict, Union + +import torch +import torch.nn as nn +from jaxtyping import Float + +from transformer_lens.components.mlps.can_be_used_as_mlp import CanBeUsedAsMLP +from transformer_lens.hook_points import HookPoint +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.utilities.addmm import batch_addmm + + +class MLP(CanBeUsedAsMLP): + def __init__(self, cfg: Union[Dict, HookedTransformerConfig]): + super().__init__(cfg) + self.select_activation_function() + + self.W_in = nn.Parameter(torch.empty(self.cfg.d_model, self.d_mlp, dtype=self.cfg.dtype)) + self.b_in = nn.Parameter(torch.zeros(self.d_mlp, dtype=self.cfg.dtype)) + + self.W_out = nn.Parameter(torch.empty(self.d_mlp, self.cfg.d_model, dtype=self.cfg.dtype)) + self.b_out = nn.Parameter(torch.zeros(self.cfg.d_model, dtype=self.cfg.dtype)) + + self.hook_pre = HookPoint() # [batch, pos, d_mlp] + self.hook_post = HookPoint() # [batch, pos, d_mlp] + + def forward( + self, x: Float[torch.Tensor, "batch pos d_model"] + ) -> Float[torch.Tensor, "batch pos d_model"]: + # This is equivalent to (roughly) W_in @ x + b_in. It's important to + # use a fused addmm to ensure it matches the Huggingface implementation + # exactly. + pre_act = self.hook_pre(batch_addmm(self.b_in, self.W_in, x)) # [batch, pos, d_mlp] + + if ( + self.cfg.is_layer_norm_activation() + and self.hook_mid is not None + and self.ln is not None + ): + mid_act = self.hook_mid(self.act_fn(pre_act)) # [batch, pos, d_mlp] + post_act = self.hook_post(self.ln(mid_act)) + else: + post_act = self.hook_post(self.act_fn(pre_act)) # [batch, pos, d_mlp] + return batch_addmm(self.b_out, self.W_out, post_act) diff --git a/transformer_lens/components/mlps/moe.py b/transformer_lens/components/mlps/moe.py new file mode 100644 index 0000000..e01f25e --- /dev/null +++ b/transformer_lens/components/mlps/moe.py @@ -0,0 +1,113 @@ +from typing import Dict, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from jaxtyping import Float + +from transformer_lens.components.mlps.can_be_used_as_mlp import CanBeUsedAsMLP +from transformer_lens.factories.activation_function_factory import ( + ActivationFunctionFactory, +) +from transformer_lens.hook_points import HookPoint +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +class MoEGatedMLP(nn.Module): + """MoEGated MLP + + This MLP matches the implementation for Mixtral on HuggingFace. It is meant to stay within our + MoE, since the format of this MLP is different from the standard MLPs throughout + TransformerLens. + + It may be possible to rework this to follow the same interface as other MLPs, but for the + time being it is being left as is to ensure accuracy. + """ + + def __init__(self, cfg: HookedTransformerConfig): + super().__init__() + self.cfg = cfg + + self.d_mlp = self.cfg.d_mlp + + if self.d_mlp is None: + raise ValueError("d_mlp must be set to use an MLP") + + self.W_in = nn.Linear(self.cfg.d_model, self.d_mlp, bias=False) + self.W_out = nn.Linear(self.d_mlp, self.cfg.d_model, bias=False) + self.W_gate = nn.Linear(self.cfg.d_model, self.d_mlp, bias=False) + + # hook on gate output but before act_fn + self.hook_gate = HookPoint() # [batch, pos, d_mlp] + # hook on the linear component of the input + self.hook_pre = HookPoint() # [batch, pos, d_mlp] + # hook on act_fn(gate_output) * W_in(x) + b_in + self.hook_post = HookPoint() # [batch, pos, d_mlp] + + self.act_fn = ActivationFunctionFactory.pick_activation_function(self.cfg) + + def forward(self, x: Float[torch.Tensor, "pos d_model"]) -> Float[torch.Tensor, "pos d_model"]: + gated_x = self.hook_gate(self.W_gate(x)) + pre_act = self.hook_pre(self.W_in(x)) + post_act = self.hook_post(self.act_fn(gated_x) * pre_act) + return self.W_out(post_act) + + +class MoE(CanBeUsedAsMLP): + def __init__(self, cfg: Union[Dict, HookedTransformerConfig]): + super().__init__(cfg) + + # Ensure that num_experts and experts_per_token are specified and non-zero + assert self.cfg.num_experts is not None, "num_experts must be specified for MoE layer" + assert self.cfg.experts_per_token, "experts_per_token must be specified for MoE layer" + + self.num_experts: int = self.cfg.num_experts + self.experts_per_token: int = self.cfg.experts_per_token + + assert ( + self.cfg.experts_per_token <= self.cfg.num_experts + ), "experts_per_token must be less than or equal to num_experts" + + self.experts = nn.ModuleList([MoEGatedMLP(self.cfg) for _ in range(self.num_experts)]) + self.W_gate = nn.Linear(self.cfg.d_model, self.cfg.num_experts, bias=False) + + # Hook on the weights of selected experts [batch pos experts_per_token] + self.hook_expert_weights = HookPoint() + # Hook on the indices of selected experts [batch pos experts_per_token] + self.hook_expert_indices = HookPoint() + + def forward( + self, x: Float[torch.Tensor, "batch pos d_model"] + ) -> Float[torch.Tensor, "batch pos d_model"]: + # [batch, pos, d_model] -> [batch, pos, num_experts] + batch, pos, d_model = x.shape + x = x.view(-1, d_model) + gate_logits = self.W_gate(x) + + # choose the top k(=experts_per_token) experts to use + # both are [batch, pos, experts_per_token] + weights = self.hook_expert_weights(F.softmax(gate_logits, dim=1, dtype=torch.float)) + weights, expert_indices = torch.topk(weights, self.experts_per_token, dim=-1) + weights /= weights.sum(dim=-1, keepdim=True) + expert_indices = self.hook_expert_indices(expert_indices) + weights = weights.to(x.dtype) + + results = torch.zeros((batch * pos, d_model), dtype=x.dtype, device=x.device) + expert_mask = F.one_hot(expert_indices, num_classes=self.num_experts).permute(2, 1, 0) + for expert_idx in range(self.num_experts): + expert_layer = self.experts[expert_idx] + idx, top_x = torch.where(expert_mask[expert_idx]) + + # Index the correct hidden states and compute the expert hidden state for + # the current expert. We need to make sure to multiply the output hidden + # states by `routing_weights` on the corresponding tokens (top-1 and top-2) + current_state = x[None, top_x].reshape(-1, d_model) + + current_hidden_states = expert_layer(current_state) * weights[top_x, idx, None] + + # However `index_add_` only support torch tensors for indexing so we'll use + # the `top_x` tensor here. + results.index_add_(0, top_x, current_hidden_states.to(x.dtype)) + + results = results.reshape(batch, pos, d_model) + return results diff --git a/transformer_lens/components/moe.py b/transformer_lens/components/moe.py deleted file mode 100644 index 895ee74..0000000 --- a/transformer_lens/components/moe.py +++ /dev/null @@ -1,65 +0,0 @@ -from typing import Dict, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fancy_einsum import einsum -from jaxtyping import Float - -from transformer_lens.components import MLP, GatedMLP -from transformer_lens.hook_points import HookPoint -from transformer_lens.HookedTransformerConfig import HookedTransformerConfig - - -class MoE(nn.Module): - def __init__(self, cfg: Union[Dict, HookedTransformerConfig]): - super().__init__() - self.cfg = HookedTransformerConfig.unwrap(cfg) - - # Ensure that num_experts and experts_per_token are specified and non-zero - assert self.cfg.num_experts is not None, "num_experts must be specified for MoE layer" - assert self.cfg.experts_per_token, "experts_per_token must be specified for MoE layer" - self.experts_per_token: int = self.cfg.experts_per_token - assert ( - self.cfg.experts_per_token <= self.cfg.num_experts - ), "experts_per_token must be less than or equal to num_experts" - - self.experts = nn.ModuleList( - [ - GatedMLP(self.cfg) if self.cfg.gated_mlp else MLP(self.cfg) - for _ in range(self.cfg.num_experts) - ] - ) - self.W_gate = nn.Parameter( - torch.empty(self.cfg.d_model, self.cfg.num_experts, dtype=self.cfg.dtype) - ) - - # Hook on the weights of selected experts [batch pos experts_per_token] - self.hook_expert_weights = HookPoint() - # Hook on the indices of selected experts [batch pos experts_per_token] - self.hook_expert_indices = HookPoint() - - def forward( - self, x: Float[torch.Tensor, "batch pos d_model"] - ) -> Float[torch.Tensor, "batch pos d_model"]: - # [batch, pos, d_model] -> [batch, pos, num_experts] - gate_logits = einsum( - "batch pos d_model, d_model num_experts -> batch pos num_experts", - x, - self.W_gate, - ) - - # choose the top k(=experts_per_token) experts to use - # both are [batch, pos, experts_per_token] - weights, expert_indices = torch.topk(gate_logits, self.experts_per_token) - weights = self.hook_expert_weights(F.softmax(weights, dim=-1)) - expert_indices = self.hook_expert_indices(expert_indices) - - results = torch.zeros_like(x) - for i, expert_mlp in enumerate(self.experts): - # find the batch, pos, and expert indices which use this expert - batch, pos, expert = torch.where(expert_indices == i) - # accumulate the weighted outputs from the expert - results[batch] += weights[batch, pos, expert, None, None] * expert_mlp(x[batch]) - - return results diff --git a/transformer_lens/components/t5_attention.py b/transformer_lens/components/t5_attention.py new file mode 100644 index 0000000..ef74b09 --- /dev/null +++ b/transformer_lens/components/t5_attention.py @@ -0,0 +1,140 @@ +import math +from typing import Dict, Optional, Union + +import torch +import torch.nn as nn +from jaxtyping import Float, Int + +from transformer_lens.components.abstract_attention import AbstractAttention +from transformer_lens.hook_points import HookPoint +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +class T5Attention(AbstractAttention): + r""" + T5 attention - with relative attention bias and cross-attention support + This realisation expects you to precompute relative positional bias, and then feed it to forward + like + ```python + attn = T5Attention(cfg, has_relative_attention_bias=True) + positional_bias = attn.compute_relative_attention_bias(query_len, key_len, device=device) + result = attn(query, key, value, position_bias=positional_bias) + ``` + """ + + def __init__( + self, + cfg: Union[Dict, HookedTransformerConfig], + has_relative_attention_bias: bool = False, + attn_type: str = "global", + layer_id: Optional[int] = None, + ): + super().__init__(cfg, attn_type, layer_id) + if isinstance(cfg, Dict): + cfg = HookedTransformerConfig.from_dict(cfg) + self.cfg = cfg + self.has_relative_attention_bias: bool = has_relative_attention_bias + + if self.has_relative_attention_bias: + if ( + cfg.relative_attention_num_buckets is None + or cfg.relative_attention_max_distance is None + ): + raise ValueError( + "You need to specify relative_attention_num_buckets and relative_attention_max_distance in config to use relative attention bias" + ) + + self.relative_attention_num_buckets = cfg.relative_attention_num_buckets + self.relative_attention_max_distance = cfg.relative_attention_max_distance + self.rel_pos_bias = nn.Embedding(self.relative_attention_num_buckets, self.cfg.n_heads) + self.rel_pos_hook = HookPoint() + + self.W_K = nn.Parameter( + torch.empty(self.cfg.n_heads, self.cfg.d_model, self.cfg.d_head, dtype=cfg.dtype) + ) + self.W_V = nn.Parameter( + torch.empty(self.cfg.n_heads, self.cfg.d_model, self.cfg.d_head, dtype=cfg.dtype) + ) + self.b_K = nn.Parameter(torch.zeros(self.cfg.n_heads, self.cfg.d_head, dtype=cfg.dtype)) + self.b_V = nn.Parameter(torch.zeros(self.cfg.n_heads, self.cfg.d_head, dtype=cfg.dtype)) + + @staticmethod + def _relative_position_bucket( + relative_position: Int[torch.Tensor, "query_pos kv_pos"], + bidirectional=True, + num_buckets=32, + max_distance=128, + ) -> Int[torch.Tensor, "query_pos kv_pos"]: + """ + added from + https://github.com/huggingface/transformers/blob/e0c3cee17085914bbe505c159beeb8ae39bc37dd/src/transformers/models/t5/modeling_t5.py#L382 + which is adapted from + https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 + + + Translate relative position to a bucket number for relative attention. The relative position is defined as + memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to + position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for + small absolute relative_position and larger buckets for larger absolute relative_positions. All relative + positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. + This should allow for more graceful generalization to longer sequences than the model has been trained on + + Args: + relative_position: an int32 Tensor + bidirectional: a boolean - whether the attention is bidirectional + num_buckets: an integer + max_distance: an integer + + Returns: + a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) + """ + relative_buckets = torch.zeros_like(relative_position) + + if bidirectional: + num_buckets //= 2 + relative_buckets += (relative_position > 0).to(torch.long) * num_buckets + relative_position = torch.abs(relative_position) + else: + relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) + # now relative_position is in the range [0, inf) + + # half of the buckets are for exact increments in positions + max_exact = num_buckets // 2 + is_small = relative_position < max_exact + + # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance + relative_position_if_large = max_exact + ( + torch.log(relative_position.float() / max_exact) + / math.log(max_distance / max_exact) + * (num_buckets - max_exact) + ).to(torch.long) + relative_position_if_large = torch.min( + relative_position_if_large, + torch.full_like(relative_position_if_large, num_buckets - 1), + ) + + relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) + return relative_buckets + + def compute_relative_attention_bias( + self, query_length: int, key_length: int, device=None + ) -> Float[torch.Tensor, "1 head_index pos kv_pos"]: + """Compute binned relative position bias""" + if device is None: + device = self.rel_pos_bias.weight.device + context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] + memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] + relative_position = memory_position - context_position # shape (query_length, key_length) + relative_position_bucket = self._relative_position_bucket( + relative_position, # shape (query_length, key_length) + bidirectional=True, + num_buckets=self.relative_attention_num_buckets, + max_distance=self.relative_attention_max_distance, + ) + values = self.rel_pos_bias( + relative_position_bucket + ) # shape (query_length, key_length, num_heads) + values = values.permute([2, 0, 1]).unsqueeze( + 0 + ) # shape (1, num_heads, query_length, key_length) + return values diff --git a/transformer_lens/components/t5_block.py b/transformer_lens/components/t5_block.py new file mode 100644 index 0000000..5a5adfd --- /dev/null +++ b/transformer_lens/components/t5_block.py @@ -0,0 +1,156 @@ +from typing import Optional + +import torch +import torch.nn as nn +from jaxtyping import Float + +from transformer_lens.components import RMSNorm, T5Attention +from transformer_lens.factories.mlp_factory import MLPFactory +from transformer_lens.hook_points import HookPoint +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.past_key_value_caching import HookedTransformerKeyValueCacheEntry +from transformer_lens.utils import repeat_along_head_dimension + + +class T5Block(nn.Module): + """ + T5 decoder Block. Uses T5Layernorm, and T5attention insted of usual ones. + Also uses cross attention if is_decoder is True. + """ + + def __init__(self, cfg: HookedTransformerConfig, block_index: int, is_decoder: bool): + super().__init__() + self.cfg = cfg + self.is_decoder = is_decoder + + self.ln1 = RMSNorm(cfg) + self.attn = T5Attention(cfg, has_relative_attention_bias=block_index == 0) + self.ln2 = RMSNorm(cfg) + if self.is_decoder: + self.cross_attn = T5Attention(cfg) + self.ln3 = RMSNorm(cfg) + self.mlp = MLPFactory.create_mlp(self.cfg) # [batch, pos, n_heads] + + self.hook_q_input = HookPoint() # [batch, pos, n_heads, d_model] + self.hook_k_input = HookPoint() # [batch, pos, n_heads, d_model] + self.hook_v_input = HookPoint() # [batch, pos, n_heads, d_model] + + self.hook_attn_in = HookPoint() # [batch, pos, d_model] + self.hook_attn_out = HookPoint() # [batch, pos, d_model] + if self.is_decoder: + self.hook_cross_attn_in = HookPoint() # [batch, pos, d_model] + self.hook_cross_attn_out = HookPoint() # [batch, pos, d_model] + self.hook_resid_mid_cross = HookPoint() # [batch, pos, d_model] + + self.hook_mlp_in = HookPoint() # [batch, pos, d_model] + self.hook_mlp_out = HookPoint() # [batch, pos, d_model] + self.hook_resid_pre = HookPoint() # [batch, pos, d_model] + self.hook_resid_mid = HookPoint() # [batch, pos, d_model] + self.hook_resid_post = HookPoint() # [batch, pos, d_model] + + def forward( + self, + resid_pre: Float[torch.Tensor, "batch pos d_model"], + additive_attention_mask: Optional[Float[torch.Tensor, "batch 1 1 pos"]] = None, + encoder_additive_attention_mask: Optional[ + Float[torch.Tensor, "batch 1 1 encoder_pos"] + ] = None, + position_bias: Optional[Float[torch.Tensor, "1 head_index pos kv_pos"]] = None, + encoder_hidden_states: Optional[Float[torch.Tensor, "batch encoder_pos d_model"]] = None, + past_kv_cache_entry: Optional[HookedTransformerKeyValueCacheEntry] = None, + ) -> Float[torch.Tensor, "batch pos d_model"]: + """A single Transformer block. + + Args: + resid_pre (torch.Tensor): The residual stream - shape [batch, pos, d_model] + encoder_hidden_states (torch.Tensor): The hidden states of the encoder for cross attention - shape [batch, encoder_pos, d_model] + cache (HookedTransformerKeyValueCache): A cache of previous keys and values, used only when generating text. Defaults to None. + attention_mask (torch.Tensor, optional): The attention mask for padded tokens. Defaults to None. + + Returns: + _type_: _description_ + """ + resid_pre = self.hook_resid_pre(resid_pre) # [batch, pos, d_model] + + attn_in = resid_pre + + if self.cfg.use_attn_in: + attn_in = self.hook_attn_in( + repeat_along_head_dimension(resid_pre, n_heads=self.cfg.n_heads) + ) + + if self.cfg.use_split_qkv_input: + n_kv_heads = ( + self.cfg.n_key_value_heads + if self.cfg.n_key_value_heads is not None + else self.cfg.n_heads + ) + query_input = self.hook_q_input( + repeat_along_head_dimension(resid_pre, n_heads=self.cfg.n_heads) + ) + key_input = self.hook_k_input( + repeat_along_head_dimension(resid_pre, n_heads=n_kv_heads) + ) + value_input = self.hook_v_input( + repeat_along_head_dimension(resid_pre, n_heads=n_kv_heads) + ) + else: + query_input = attn_in + key_input = attn_in + value_input = attn_in + + attn_out = self.hook_attn_out( + # hook the residual stream states that are used to calculate the + # queries, keys and values, independently. + # Then take the layer norm of these inputs, and pass these to the attention module. + self.attn( + query_input=self.ln1(query_input), + key_input=self.ln1(key_input), + value_input=self.ln1(value_input), + past_kv_cache_entry=past_kv_cache_entry, + additive_attention_mask=additive_attention_mask, + position_bias=position_bias, + ) + ) + + # [batch, pos, d_model] + + resid_mid = self.hook_resid_mid(resid_pre + attn_out) # [batch, pos, d_model] + + if self.is_decoder: + cross_attn_in = ( + resid_mid + if not self.cfg.use_attn_in + else self.hook_cross_attn_in(resid_mid.clone()) + ) + + if encoder_hidden_states is None: + raise ValueError("Encoder hidden states must be provided for cross attention!") + + cross_attn_out = self.hook_cross_attn_out( + self.cross_attn( + query_input=self.ln2(cross_attn_in), + key_input=encoder_hidden_states, + value_input=encoder_hidden_states, + additive_attention_mask=encoder_additive_attention_mask, + ) + ) + resid_mid_cross = self.hook_resid_mid_cross(resid_mid + cross_attn_out) + + mlp_in = ( + resid_mid_cross + if not self.cfg.use_hook_mlp_in + else self.hook_mlp_in(resid_mid_cross.clone()) + ) + + normalized_resid_mid = self.ln3(mlp_in) + else: + mlp_in = ( + resid_mid if not self.cfg.use_hook_mlp_in else self.hook_mlp_in(resid_mid.clone()) + ) + normalized_resid_mid = self.ln2(mlp_in) + + mlp_out = self.hook_mlp_out(self.mlp(normalized_resid_mid)) # [batch, pos, d_model] + resid_post = self.hook_resid_post(mlp_in + mlp_out) # [batch, pos, d_model] + + return resid_post diff --git a/transformer_lens/components/transformer_block.py b/transformer_lens/components/transformer_block.py index 67ade64..6db16a1 100644 --- a/transformer_lens/components/transformer_block.py +++ b/transformer_lens/components/transformer_block.py @@ -2,24 +2,23 @@ This module contains all the component :class:`TransformerBlock`. """ -import logging -from typing import Dict, Optional, Union + +from typing import Callable, Dict, Optional, Union import torch import torch.nn as nn from jaxtyping import Float, Int from transformer_lens.components import ( - MLP, Attention, - GatedMLP, GroupedQueryAttention, LayerNorm, LayerNormPre, - MoE, RMSNorm, RMSNormPre, ) +from transformer_lens.components.mlps.can_be_used_as_mlp import CanBeUsedAsMLP +from transformer_lens.factories.mlp_factory import MLPFactory from transformer_lens.hook_points import HookPoint from transformer_lens.HookedTransformerConfig import HookedTransformerConfig from transformer_lens.past_key_value_caching import HookedTransformerKeyValueCacheEntry @@ -30,49 +29,60 @@ class TransformerBlock(nn.Module): ln1: nn.Module ln2: nn.Module - mlp: nn.Module + mlp: CanBeUsedAsMLP def __init__(self, cfg: Union[Dict, HookedTransformerConfig], block_index): super().__init__() self.cfg = HookedTransformerConfig.unwrap(cfg) - if self.cfg.normalization_type == "LN": - self.ln1 = LayerNorm(cfg) - if not self.cfg.attn_only: - self.ln2 = LayerNorm(cfg) - elif self.cfg.normalization_type == "LNPre": + normalization_layer: Callable # type: ignore + normalization_layer_after: Callable # type: ignore + + self.normalization_type = self.cfg.normalization_type + + if self.normalization_type == "LN": + normalization_layer = LayerNorm + elif self.normalization_type == "LNPre": # We've folded in LayerNorm weights, so just need the center + scale parts - self.ln1 = LayerNormPre(cfg) - if not self.cfg.attn_only: - self.ln2 = LayerNormPre(cfg) - elif self.cfg.normalization_type == "RMS": - self.ln1 = RMSNorm(cfg) - if not self.cfg.attn_only: - self.ln2 = RMSNorm(cfg) - elif self.cfg.normalization_type == "RMSPre": - self.ln1 = RMSNormPre(cfg) - if not self.cfg.attn_only: - self.ln2 = RMSNormPre(cfg) - elif self.cfg.normalization_type is None: - self.ln1 = nn.Identity() - if not self.cfg.attn_only: - self.ln2 = nn.Identity() + normalization_layer = LayerNormPre + elif self.normalization_type == "RMS": + normalization_layer = RMSNorm + elif self.normalization_type == "RMSPre": + normalization_layer = RMSNormPre + elif self.normalization_type is None: + # This should just be the identity. + # We need to make this a lambda so we can call it on the config, just like the others + normalization_layer = lambda cfg: nn.Identity() else: - logging.warning(f"Invalid normalization_type passed in {self.cfg.normalization_type}") + raise ValueError(f"Invalid normalization_type passed in: {self.normalization_type}") + + if self.cfg.use_normalization_before_and_after: + # If we use LN before and after, we do *not* fold in the weights to the LN + # after, though we can fold for the one before. + if self.normalization_type is None: + normalization_layer_after = lambda cfg: nn.Identity() + elif self.normalization_type.startswith("RMS"): + normalization_layer_after = RMSNorm + elif self.normalization_type.startswith("LayerNorm"): + normalization_layer_after = LayerNorm + + self.ln1 = normalization_layer(cfg) + if self.cfg.use_normalization_before_and_after: + self.ln1_post = normalization_layer_after(cfg) + if not self.cfg.attn_only: + self.ln2 = normalization_layer(cfg) + if self.cfg.use_normalization_before_and_after: + self.ln2_post = normalization_layer_after(cfg) attention = Attention if self.cfg.n_key_value_heads is None else GroupedQueryAttention if not self.cfg.use_local_attn: - self.attn = attention(cfg, "global", block_index) + self.attn = attention(self.cfg, "global", block_index) else: - assert self.cfg.attn_types is not None + if self.cfg.attn_types is None: + raise ValueError("attn_types must be set when using local attention") attn_type = self.cfg.attn_types[block_index] - self.attn = attention(cfg, attn_type, block_index) + self.attn = attention(self.cfg, attn_type, block_index) if not self.cfg.attn_only: - if self.cfg.num_experts: - self.mlp = MoE(cfg) - elif self.cfg.gated_mlp: - self.mlp = GatedMLP(cfg) - else: - self.mlp = MLP(cfg) + self.mlp = MLPFactory.create_mlp(self.cfg) self.hook_attn_in = HookPoint() # [batch, pos, n_heads, d_model] self.hook_q_input = HookPoint() # [batch, pos, n_heads, d_model] @@ -104,7 +114,7 @@ def forward( attention_mask (torch.Tensor, optional): The attention mask for padded tokens. Defaults to None. Returns: - _type_: _description_ + Float[torch.Tensor, "batch pos d_model"]: Our resulting tensor """ resid_pre = self.hook_resid_pre(resid_pre) # [batch, pos, d_model] @@ -142,7 +152,7 @@ def forward( key_input = attn_in value_input = attn_in - attn_out = self.hook_attn_out( + attn_out = ( # hook the residual stream states that are used to calculate the # queries, keys and values, independently. # Then take the layer norm of these inputs, and pass these to the attention module. @@ -156,13 +166,19 @@ def forward( attention_mask=attention_mask, ) ) # [batch, pos, d_model] + if self.cfg.use_normalization_before_and_after: + # If we use LayerNorm both before and after, then apply the second LN after the layer + # and before the hook. We do it before the hook so hook_attn_out captures "that which + # is added to the residual stream" + attn_out = self.ln1_post(attn_out) + attn_out = self.hook_attn_out(attn_out) if not self.cfg.attn_only and not self.cfg.parallel_attn_mlp: resid_mid = self.hook_resid_mid(resid_pre + attn_out) # [batch, pos, d_model] mlp_in = ( resid_mid if not self.cfg.use_hook_mlp_in else self.hook_mlp_in(resid_mid.clone()) ) normalized_resid_mid = self.ln2(mlp_in) - mlp_out = self.hook_mlp_out(self.mlp(normalized_resid_mid)) # [batch, pos, d_model] + mlp_out = self.apply_mlp(normalized_resid_mid) resid_post = self.hook_resid_post(resid_mid + mlp_out) # [batch, pos, d_model] elif self.cfg.parallel_attn_mlp: # Dumb thing done by GPT-J, both MLP and Attn read from resid_pre and write to resid_post, no resid_mid used. @@ -170,10 +186,23 @@ def forward( normalized_resid_pre_2 = self.ln2( resid_pre if not self.cfg.use_hook_mlp_in else self.hook_mlp_in(resid_pre.clone()) ) - mlp_out = self.hook_mlp_out(self.mlp(normalized_resid_pre_2)) # [batch, pos, d_model] + mlp_out = self.apply_mlp(normalized_resid_pre_2) resid_post = self.hook_resid_post( resid_pre + attn_out + mlp_out ) # [batch, pos, d_model] else: resid_post = self.hook_resid_post(resid_pre + attn_out) # [batch, pos, d_model] return resid_post + + def apply_mlp( + self, normalized_resid: Float[torch.Tensor, "batch pos d_model"] + ) -> Float[torch.Tensor, "batch pos d_model"]: + """Centralized point where the MLP is applied to the forward pass + + Returns: + Float[torch.Tensor, "batch pos d_model"]: Our resulting tensor + """ + mlp_out = self.mlp(normalized_resid) # [batch, pos, d_model] + if self.cfg.use_normalization_before_and_after: + mlp_out = self.ln2_post(mlp_out) + return self.hook_mlp_out(mlp_out) diff --git a/transformer_lens/components/unembed.py b/transformer_lens/components/unembed.py index 938dbe9..538532f 100644 --- a/transformer_lens/components/unembed.py +++ b/transformer_lens/components/unembed.py @@ -2,14 +2,15 @@ This module contains all the component :class:`Unembed`. """ + from typing import Dict, Union import torch import torch.nn as nn -from fancy_einsum import einsum from jaxtyping import Float from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.utilities.addmm import batch_addmm class Unembed(nn.Module): @@ -27,11 +28,4 @@ def __init__(self, cfg: Union[Dict, HookedTransformerConfig]): def forward( self, residual: Float[torch.Tensor, "batch pos d_model"] ) -> Float[torch.Tensor, "batch pos d_vocab_out"]: - return ( - einsum( - "batch pos d_model, d_model vocab -> batch pos vocab", - residual, - self.W_U, - ) - + self.b_U - ) + return batch_addmm(self.b_U, self.W_U, residual) diff --git a/transformer_lens/factories/activation_function_factory.py b/transformer_lens/factories/activation_function_factory.py new file mode 100644 index 0000000..b640387 --- /dev/null +++ b/transformer_lens/factories/activation_function_factory.py @@ -0,0 +1,37 @@ +"""Activation Function Factory + +Centralized location for selection supported activation functions throughout TransformerLens +""" + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.utilities.activation_functions import ( + SUPPORTED_ACTIVATIONS, + ActivationFunction, +) + + +class ActivationFunctionFactory: + @staticmethod + def pick_activation_function(cfg: HookedTransformerConfig) -> ActivationFunction: + """Use this to select what activation function is needed based on configuration. + + Args: + cfg (HookedTransformerConfig): The already created hooked transformer config + + Raises: + ValueError: If there is a problem with the requested activation function. + + Returns: + ActivationFunction: The activation function based on the dictionary of supported activations. + """ + act_fn = cfg.act_fn + + if act_fn is None: + raise ValueError("act_fn not set when trying to select Activation Function") + + activation_function = SUPPORTED_ACTIVATIONS.get(act_fn) + + if activation_function is None: + raise ValueError(f"Invalid activation function name: {act_fn}") + + return activation_function diff --git a/transformer_lens/factories/mlp_factory.py b/transformer_lens/factories/mlp_factory.py new file mode 100644 index 0000000..de873b0 --- /dev/null +++ b/transformer_lens/factories/mlp_factory.py @@ -0,0 +1,21 @@ +"""MLP Factory + +Centralized location for creating any MLP needed within TransformerLens +""" +from transformer_lens.components.mlps.can_be_used_as_mlp import CanBeUsedAsMLP +from transformer_lens.components.mlps.gated_mlp import GatedMLP +from transformer_lens.components.mlps.gated_mlp_4bit import GatedMLP4Bit +from transformer_lens.components.mlps.mlp import MLP +from transformer_lens.components.mlps.moe import MoE +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +class MLPFactory: + @staticmethod + def create_mlp(cfg: HookedTransformerConfig) -> CanBeUsedAsMLP: + if cfg.num_experts: + return MoE(cfg) + elif cfg.gated_mlp: + return GatedMLP(cfg) if not cfg.load_in_4bit else GatedMLP4Bit(cfg) + else: + return MLP(cfg) diff --git a/transformer_lens/hook_points.py b/transformer_lens/hook_points.py index 05751b6..ec71881 100644 --- a/transformer_lens/hook_points.py +++ b/transformer_lens/hook_points.py @@ -44,7 +44,7 @@ class LensHandle: # Define type aliases -NamesFilter = Optional[Union[Callable[[str], bool], Sequence[str]]] +NamesFilter = Optional[Union[Callable[[str], bool], Sequence[str], str]] @runtime_checkable @@ -117,7 +117,7 @@ def full_hook( _internal_hooks = self._forward_hooks visible_hooks = self.fwd_hooks elif dir == "bwd": - pt_handle = self.register_backward_hook(full_hook) + pt_handle = self.register_full_backward_hook(full_hook) _internal_hooks = self._backward_hooks visible_hooks = self.bwd_hooks else: diff --git a/transformer_lens/loading_from_pretrained.py b/transformer_lens/loading_from_pretrained.py index a0b29c0..7c36efd 100644 --- a/transformer_lens/loading_from_pretrained.py +++ b/transformer_lens/loading_from_pretrained.py @@ -7,15 +7,41 @@ import logging import os import re -from typing import Dict, Optional, Union, cast +from pathlib import Path +from typing import Dict, Optional, Union -import einops import torch from huggingface_hub import HfApi -from transformers import AutoConfig, AutoModelForCausalLM, BertForPreTraining +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + BertForPreTraining, + T5ForConditionalGeneration, +) import transformer_lens.utils as utils from transformer_lens.HookedTransformerConfig import HookedTransformerConfig +from transformer_lens.pretrained.weight_conversions import ( + convert_bert_weights, + convert_bloom_weights, + convert_coder_weights, + convert_gemma_weights, + convert_gpt2_weights, + convert_gptj_weights, + convert_llama_weights, + convert_mingpt_weights, + convert_mistral_weights, + convert_mixtral_weights, + convert_neel_solu_old_weights, + convert_neo_weights, + convert_neox_weights, + convert_opt_weights, + convert_phi3_weights, + convert_phi_weights, + convert_qwen2_weights, + convert_qwen_weights, + convert_t5_weights, +) OFFICIAL_MODEL_NAMES = [ "gpt2", @@ -171,6 +197,12 @@ "Qwen/Qwen1.5-7B-Chat", "Qwen/Qwen1.5-14B", "Qwen/Qwen1.5-14B-Chat", + "Qwen/Qwen2-0.5B", + "Qwen/Qwen2-0.5B-Instruct", + "Qwen/Qwen2-1.5B", + "Qwen/Qwen2-1.5B-Instruct", + "Qwen/Qwen2-7B", + "Qwen/Qwen2-7B-Instruct", "microsoft/phi-1", "microsoft/phi-1_5", "microsoft/phi-2", @@ -179,10 +211,19 @@ "google/gemma-7b", "google/gemma-2b-it", "google/gemma-7b-it", + "google/gemma-2-2b", + "google/gemma-2-2b-it", + "google/gemma-2-9b", + "google/gemma-2-9b-it", + "google/gemma-2-27b", + "google/gemma-2-27b-it", "01-ai/Yi-6B", "01-ai/Yi-34B", "01-ai/Yi-6B-Chat", "01-ai/Yi-34B-Chat", + "google-t5/t5-small", + "google-t5/t5-base", + "google-t5/t5-large", "ai-forever/mGPT", ] """Official model names for models on HuggingFace.""" @@ -594,10 +635,19 @@ "google/gemma-7b": ["gemma-7b"], "google/gemma-2b-it": ["gemma-2b-it"], "google/gemma-7b-it": ["gemma-7b-it"], + "google/gemma-2-2b": ["gemma-2-2b"], + "google/gemma-2-9b": ["gemma-2-9b"], + "google/gemma-2-27b": ["gemma-2-27b"], + "google/gemma-2-2b-it": ["gemma-2-2b-it"], + "google/gemma-2-9b-it": ["gemma-2-9b-it"], + "google/gemma-2-27b-it": ["gemma-2-27b-it"], "01-ai/Yi-6B": ["yi-6b", "Yi-6B"], "01-ai/Yi-34B": ["yi-34b", "Yi-34B"], "01-ai/Yi-6B-Chat": ["yi-6b-chat", "Yi-6B-Chat"], "01-ai/Yi-34B-Chat": ["yi-34b-chat", "Yi-34B-Chat"], + "google-t5/t5-small": ["t5-small"], + "google-t5/t5-base": ["t5-base"], + "google-t5/t5-large": ["t5-large"], "ai-forever/mGPT": ["mGPT"], } """Model aliases for models on HuggingFace.""" @@ -659,10 +709,17 @@ def convert_hf_model_config(model_name: str, **kwargs): Takes the official_model_name as an input. """ # In case the user passed in an alias - official_model_name = get_official_model_name(model_name) + if (Path(model_name) / "config.json").exists(): + logging.info("Loading model config from local directory") + official_model_name = model_name + else: + official_model_name = get_official_model_name(model_name) + # Load HuggingFace model config if "llama" in official_model_name.lower(): architecture = "LlamaForCausalLM" + elif "gemma-2" in official_model_name.lower(): + architecture = "Gemma2ForCausalLM" elif "gemma" in official_model_name.lower(): architecture = "GemmaForCausalLM" else: @@ -954,16 +1011,18 @@ def convert_hf_model_config(model_name: str, **kwargs): } elif architecture == "MixtralForCausalLM": cfg_dict = { + "dtype": torch.bfloat16, "d_model": hf_config.hidden_size, "d_head": hf_config.hidden_size // hf_config.num_attention_heads, "n_heads": hf_config.num_attention_heads, "d_mlp": hf_config.intermediate_size, "n_layers": hf_config.num_hidden_layers, - "n_ctx": 2048, # hf_config.max_position_embeddings, # Capped due to memory issues + "n_ctx": hf_config.max_position_embeddings, # Capped due to memory issues "d_vocab": hf_config.vocab_size, "act_fn": hf_config.hidden_act, "normalization_type": "RMS", "positional_embedding_type": "rotary", + "rotary_base": hf_config.rope_theta, "window_size": hf_config.sliding_window, # This is None, as no sliding window was used "attn_types": ["global"] * 32, "eps": hf_config.rms_norm_eps, @@ -1062,6 +1121,7 @@ def convert_hf_model_config(model_name: str, **kwargs): "d_model": hf_config.hidden_size, "d_head": hf_config.hidden_size // hf_config.num_attention_heads, "n_heads": hf_config.num_attention_heads, + "n_key_value_heads": hf_config.num_key_value_heads, "d_mlp": hf_config.intermediate_size, "n_layers": hf_config.num_hidden_layers, "n_ctx": 2048, # Capped bc the actual ctx length is 30k and the attn mask would be too big @@ -1168,6 +1228,107 @@ def convert_hf_model_config(model_name: str, **kwargs): "gated_mlp": True, "final_rms": True, } + elif official_model_name.startswith("google/gemma-2-2b"): + # Architecture for Gemma-2 2b and Gemma-2 2b Instruct models + cfg_dict = { + "d_model": 2304, + "d_head": 256, + "n_heads": 8, + "d_mlp": 9216, + "n_layers": 26, + "n_ctx": 8192, + "eps": 1e-06, + "d_vocab": 256000, + "act_fn": "gelu_pytorch_tanh", + "initializer_range": 0.02, + "normalization_type": "RMS", + "rotary_base": 10000.0, + "positional_embedding_type": "rotary", + "use_attn_scale": True, + "n_key_value_heads": 4, + "window_size": 4096, + "use_local_attn": True, + "attn_types": ["global", "local"] * 21, # Alternate global and local attn + "attn_scores_soft_cap": 50.0, + "output_logits_soft_cap": 30.0, + "gated_mlp": True, + "final_rms": True, + "use_normalization_before_and_after": True, + } + elif official_model_name.startswith("google/gemma-2-9b"): + # Architecture for Gemma-2 9b and Gemma-2 9b Instruct models + cfg_dict = { + "d_model": 3584, + "d_head": 256, + "n_heads": 16, + "d_mlp": 14336, + "n_layers": 42, + "n_ctx": 8192, + "eps": 1e-06, + "d_vocab": 256000, + "act_fn": "gelu_pytorch_tanh", + "initializer_range": 0.02, + "normalization_type": "RMS", + "rotary_base": 10000.0, + "positional_embedding_type": "rotary", + "use_attn_scale": True, + "n_key_value_heads": 8, + "window_size": 4096, + "use_local_attn": True, + "attn_types": ["global", "local"] * 21, # Alternate global and local attn + "attn_scores_soft_cap": 50.0, + "output_logits_soft_cap": 30.0, + "gated_mlp": True, + "final_rms": True, + "use_normalization_before_and_after": True, + } + elif official_model_name.startswith("google/gemma-2-27b"): + # Architecture for Gemma-2 27b and Gemma-2 27b Instruct models + cfg_dict = { + "d_model": 4608, + "d_head": 128, + "n_heads": 32, + "d_mlp": 36864, + "n_layers": 46, + "n_ctx": 8192, + "eps": 1e-06, + "d_vocab": 256000, + "act_fn": "gelu_pytorch_tanh", + "initializer_range": 0.02, + "normalization_type": "RMS", + "rotary_base": 10000.0, + "positional_embedding_type": "rotary", + "use_attn_scale": True, + "attn_scale": 12.0, + "n_key_value_heads": 16, + "window_size": 4096, + "use_local_attn": True, + "attn_types": ["global", "local"] * 23, # Alternate global and local attn + "attn_scores_soft_cap": 50.0, + "output_logits_soft_cap": 30.0, + "gated_mlp": True, + "final_rms": True, + "use_normalization_before_and_after": True, + } + elif architecture == "T5ForConditionalGeneration": + cfg_dict = { + "d_model": hf_config.d_model, + "d_head": hf_config.d_kv, + "n_heads": hf_config.num_heads, + "d_mlp": hf_config.d_ff, + "d_vocab": hf_config.vocab_size, + "n_layers": hf_config.num_layers, + "n_ctx": hf_config.max_length, + "eps": hf_config.layer_norm_epsilon, + "act_fn": hf_config.feed_forward_proj, + "positional_embedding_type": "relative_positional_bias", + "relative_attention_max_distance": hf_config.relative_attention_max_distance, + "relative_attention_num_buckets": hf_config.relative_attention_num_buckets, + "decoder_start_token_id": hf_config.decoder_start_token_id, + "attention_dir": "bidirectional", + "use_attn_scale": False, + "tie_word_embeddings": hf_config.tie_word_embeddings, + } else: raise NotImplementedError(f"{architecture} is not currently supported.") # All of these models use LayerNorm @@ -1266,7 +1427,12 @@ def get_pretrained_model_config( Also given to other HuggingFace functions when compatible. """ - official_model_name = get_official_model_name(model_name) + if Path(model_name).exists(): + # If the model_name is a path, it's a local model + cfg_dict = convert_hf_model_config(model_name, **kwargs) + official_model_name = model_name + else: + official_model_name = get_official_model_name(model_name) if ( official_model_name.startswith("NeelNanda") or official_model_name.startswith("ArthurConmy") @@ -1422,7 +1588,11 @@ def get_pretrained_state_dict( if "torch_dtype" in kwargs: dtype = kwargs["torch_dtype"] del kwargs["torch_dtype"] - official_model_name = get_official_model_name(official_model_name) + if Path(official_model_name).exists(): + official_model_name = str(Path(official_model_name).resolve()) + logging.info(f"Loading model from local path {official_model_name}") + else: + official_model_name = get_official_model_name(official_model_name) if official_model_name.startswith(NEED_REMOTE_CODE_MODELS) and not kwargs.get( "trust_remote_code", False ): @@ -1488,6 +1658,13 @@ def get_pretrained_state_dict( token=huggingface_token, **kwargs, ) + elif "t5" in official_model_name: + hf_model = T5ForConditionalGeneration.from_pretrained( + official_model_name, + torch_dtype=dtype, + token=huggingface_token, + **kwargs, + ) else: hf_model = AutoModelForCausalLM.from_pretrained( official_model_name, @@ -1515,6 +1692,8 @@ def get_pretrained_state_dict( state_dict = convert_llama_weights(hf_model, cfg) elif cfg.original_architecture == "BertForMaskedLM": state_dict = convert_bert_weights(hf_model, cfg) + elif cfg.original_architecture == "T5ForConditionalGeneration": + state_dict = convert_t5_weights(hf_model, cfg) elif cfg.original_architecture == "MistralForCausalLM": state_dict = convert_mistral_weights(hf_model, cfg) elif cfg.original_architecture == "MixtralForCausalLM": @@ -1533,6 +1712,8 @@ def get_pretrained_state_dict( state_dict = convert_phi3_weights(hf_model, cfg) elif cfg.original_architecture == "GemmaForCausalLM": state_dict = convert_gemma_weights(hf_model, cfg) + elif cfg.original_architecture == "Gemma2ForCausalLM": + state_dict = convert_gemma_weights(hf_model, cfg) else: raise ValueError( f"Loading weights from the architecture is not currently supported: {cfg.original_architecture}, generated from model name {cfg.model_name}. Feel free to open an issue on GitHub to request this feature." @@ -1571,1177 +1752,6 @@ def fill_missing_keys(model, state_dict): return state_dict -# Convert state dicts -def convert_gpt2_weights(gpt2, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = gpt2.transformer.wte.weight - state_dict["pos_embed.W_pos"] = gpt2.transformer.wpe.weight - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = gpt2.transformer.h[l].ln_1.weight - state_dict[f"blocks.{l}.ln1.b"] = gpt2.transformer.h[l].ln_1.bias - - # In GPT-2, q,k,v are produced by one big linear map, whose output is - # concat([q, k, v]) - W = gpt2.transformer.h[l].attn.c_attn.weight - W_Q, W_K, W_V = torch.tensor_split(W, 3, dim=1) - W_Q = einops.rearrange(W_Q, "m (i h)->i m h", i=cfg.n_heads) - W_K = einops.rearrange(W_K, "m (i h)->i m h", i=cfg.n_heads) - W_V = einops.rearrange(W_V, "m (i h)->i m h", i=cfg.n_heads) - - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.W_V"] = W_V - - qkv_bias = gpt2.transformer.h[l].attn.c_attn.bias - qkv_bias = einops.rearrange( - qkv_bias, - "(qkv index head)->qkv index head", - qkv=3, - index=cfg.n_heads, - head=cfg.d_head, - ) - state_dict[f"blocks.{l}.attn.b_Q"] = qkv_bias[0] - state_dict[f"blocks.{l}.attn.b_K"] = qkv_bias[1] - state_dict[f"blocks.{l}.attn.b_V"] = qkv_bias[2] - - W_O = gpt2.transformer.h[l].attn.c_proj.weight - W_O = einops.rearrange(W_O, "(i h) m->i h m", i=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - state_dict[f"blocks.{l}.attn.b_O"] = gpt2.transformer.h[l].attn.c_proj.bias - - state_dict[f"blocks.{l}.ln2.w"] = gpt2.transformer.h[l].ln_2.weight - state_dict[f"blocks.{l}.ln2.b"] = gpt2.transformer.h[l].ln_2.bias - - W_in = gpt2.transformer.h[l].mlp.c_fc.weight - state_dict[f"blocks.{l}.mlp.W_in"] = W_in - state_dict[f"blocks.{l}.mlp.b_in"] = gpt2.transformer.h[l].mlp.c_fc.bias - - W_out = gpt2.transformer.h[l].mlp.c_proj.weight - state_dict[f"blocks.{l}.mlp.W_out"] = W_out - state_dict[f"blocks.{l}.mlp.b_out"] = gpt2.transformer.h[l].mlp.c_proj.bias - state_dict["unembed.W_U"] = gpt2.lm_head.weight.T - - state_dict["ln_final.w"] = gpt2.transformer.ln_f.weight - state_dict["ln_final.b"] = gpt2.transformer.ln_f.bias - return state_dict - - -def convert_neo_weights(neo, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = neo.transformer.wte.weight - state_dict["pos_embed.W_pos"] = neo.transformer.wpe.weight - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = neo.transformer.h[l].ln_1.weight - state_dict[f"blocks.{l}.ln1.b"] = neo.transformer.h[l].ln_1.bias - - W_Q = neo.transformer.h[l].attn.attention.q_proj.weight - W_K = neo.transformer.h[l].attn.attention.k_proj.weight - W_V = neo.transformer.h[l].attn.attention.v_proj.weight - W_Q = einops.rearrange(W_Q, "(i h) m->i m h", i=cfg.n_heads) - W_K = einops.rearrange(W_K, "(i h) m->i m h", i=cfg.n_heads) - W_V = einops.rearrange(W_V, "(i h) m->i m h", i=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.W_V"] = W_V - - state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - state_dict[f"blocks.{l}.attn.b_K"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - state_dict[f"blocks.{l}.attn.b_V"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - - W_O = neo.transformer.h[l].attn.attention.out_proj.weight - W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - state_dict[f"blocks.{l}.attn.b_O"] = neo.transformer.h[l].attn.attention.out_proj.bias - - state_dict[f"blocks.{l}.ln2.w"] = neo.transformer.h[l].ln_2.weight - state_dict[f"blocks.{l}.ln2.b"] = neo.transformer.h[l].ln_2.bias - - state_dict[f"blocks.{l}.mlp.W_in"] = neo.transformer.h[l].mlp.c_fc.weight.T - state_dict[f"blocks.{l}.mlp.b_in"] = neo.transformer.h[l].mlp.c_fc.bias - - state_dict[f"blocks.{l}.mlp.W_out"] = neo.transformer.h[l].mlp.c_proj.weight.T - state_dict[f"blocks.{l}.mlp.b_out"] = neo.transformer.h[l].mlp.c_proj.bias - state_dict["ln_final.w"] = neo.transformer.ln_f.weight - state_dict["ln_final.b"] = neo.transformer.ln_f.bias - - state_dict["unembed.W_U"] = neo.lm_head.weight.T - state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - return state_dict - - -def convert_gptj_weights(gptj, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = gptj.transformer.wte.weight - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = gptj.transformer.h[l].ln_1.weight - state_dict[f"blocks.{l}.ln1.b"] = gptj.transformer.h[l].ln_1.bias - - W_Q = gptj.transformer.h[l].attn.q_proj.weight - W_K = gptj.transformer.h[l].attn.k_proj.weight - W_V = gptj.transformer.h[l].attn.v_proj.weight - W_Q = einops.rearrange(W_Q, "(i h) m->i m h", i=cfg.n_heads) - W_K = einops.rearrange(W_K, "(i h) m->i m h", i=cfg.n_heads) - W_V = einops.rearrange(W_V, "(i h) m->i m h", i=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.W_V"] = W_V - - state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - state_dict[f"blocks.{l}.attn.b_K"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - state_dict[f"blocks.{l}.attn.b_V"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - - W_O = gptj.transformer.h[l].attn.out_proj.weight - W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - # Layer Norm 1 and 2 are tied. - state_dict[f"blocks.{l}.ln2.w"] = state_dict[f"blocks.{l}.ln1.w"] - state_dict[f"blocks.{l}.ln2.b"] = state_dict[f"blocks.{l}.ln1.b"] - - state_dict[f"blocks.{l}.mlp.W_in"] = gptj.transformer.h[l].mlp.fc_in.weight.T - state_dict[f"blocks.{l}.mlp.b_in"] = gptj.transformer.h[l].mlp.fc_in.bias - - state_dict[f"blocks.{l}.mlp.W_out"] = gptj.transformer.h[l].mlp.fc_out.weight.T - state_dict[f"blocks.{l}.mlp.b_out"] = gptj.transformer.h[l].mlp.fc_out.bias - state_dict["ln_final.w"] = gptj.transformer.ln_f.weight - state_dict["ln_final.b"] = gptj.transformer.ln_f.bias - - state_dict["unembed.W_U"] = gptj.lm_head.weight.T - # Contains a bias, for some reason? - state_dict["unembed.b_U"] = gptj.lm_head.bias - return state_dict - - -def convert_neox_weights(neox, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = neox.gpt_neox.embed_in.weight - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = neox.gpt_neox.layers[l].input_layernorm.weight - state_dict[f"blocks.{l}.ln1.b"] = neox.gpt_neox.layers[l].input_layernorm.bias - - # For some inexplicable reason, NeoX both uses the concatenated QKV - # matmul of GPT-2 (afaict this has a neglible performance impact) AND - # has the flattened axis in the DIFFERENT order of (head_index qkv - # d_head) - this took me an hour to debug... - W = neox.gpt_neox.layers[l].attention.query_key_value.weight - W = einops.rearrange(W, "(i qkv h) m->qkv i m h", i=cfg.n_heads, qkv=3) - - # Fold in layer norm weights - state_dict[f"blocks.{l}.attn.W_Q"] = W[0] - state_dict[f"blocks.{l}.attn.W_K"] = W[1] - state_dict[f"blocks.{l}.attn.W_V"] = W[2] - - qkv_bias = neox.gpt_neox.layers[l].attention.query_key_value.bias - qkv_bias = einops.rearrange( - qkv_bias, - "(index qkv head)->qkv index head", - qkv=3, - index=cfg.n_heads, - head=cfg.d_head, - ) - # Fold in layer norm biases - state_dict[f"blocks.{l}.attn.b_Q"] = qkv_bias[0] - state_dict[f"blocks.{l}.attn.b_K"] = qkv_bias[1] - state_dict[f"blocks.{l}.attn.b_V"] = qkv_bias[2] - - W_O = neox.gpt_neox.layers[l].attention.dense.weight - W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - state_dict[f"blocks.{l}.attn.b_O"] = neox.gpt_neox.layers[l].attention.dense.bias - - state_dict[f"blocks.{l}.ln2.w"] = neox.gpt_neox.layers[l].post_attention_layernorm.weight - state_dict[f"blocks.{l}.ln2.b"] = neox.gpt_neox.layers[l].post_attention_layernorm.bias - - state_dict[f"blocks.{l}.mlp.W_in"] = neox.gpt_neox.layers[l].mlp.dense_h_to_4h.weight.T - state_dict[f"blocks.{l}.mlp.b_in"] = neox.gpt_neox.layers[l].mlp.dense_h_to_4h.bias - - state_dict[f"blocks.{l}.mlp.W_out"] = neox.gpt_neox.layers[l].mlp.dense_4h_to_h.weight.T - state_dict[f"blocks.{l}.mlp.b_out"] = neox.gpt_neox.layers[l].mlp.dense_4h_to_h.bias - state_dict["ln_final.w"] = neox.gpt_neox.final_layer_norm.weight - state_dict["ln_final.b"] = neox.gpt_neox.final_layer_norm.bias - - state_dict["unembed.W_U"] = neox.embed_out.weight.T - state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - return state_dict - - -def convert_llama_weights(llama, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = llama.model.embed_tokens.weight - - # Some models with the Llama architecture use Grouped Query Attention, and so for these we need to modify - # the state dict keys for the K/V attention weight/biases, prepending "_" to the key names. - using_gqa = cfg.n_key_value_heads is not None - gqa_uscore = "_" if using_gqa else "" - # need a cast since MyPy isn't smart enough to realize that using_gqa implies n_key_value_heads is not None - n_kv_heads = cast(int, cfg.n_key_value_heads if using_gqa else cfg.n_heads) - - # llama has no biases anywhere and deals with everything else roughly like - # GPTNeoX with different names - - assert cfg.d_mlp is not None # keep mypy happy - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = llama.model.layers[l].input_layernorm.weight - - W_Q = llama.model.layers[l].self_attn.q_proj.weight - W_K = llama.model.layers[l].self_attn.k_proj.weight - W_V = llama.model.layers[l].self_attn.v_proj.weight - - # in case of quantization, - # parameters should stay as bitsandbytes.nn.modules.Params4bit - if not cfg.load_in_4bit: - W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) - W_K = einops.rearrange(W_K, "(n h) m->n m h", n=n_kv_heads) - W_V = einops.rearrange(W_V, "(n h) m->n m h", n=n_kv_heads) - - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.{gqa_uscore}W_K"] = W_K - state_dict[f"blocks.{l}.attn.{gqa_uscore}W_V"] = W_V - - state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros( - cfg.n_heads, cfg.d_head, dtype=cfg.dtype, device=cfg.device - ) - state_dict[f"blocks.{l}.attn.{gqa_uscore}b_K"] = torch.zeros( - n_kv_heads, - cfg.d_head, - dtype=cfg.dtype, - device=cfg.device, - ) - state_dict[f"blocks.{l}.attn.{gqa_uscore}b_V"] = torch.zeros( - n_kv_heads, - cfg.d_head, - dtype=cfg.dtype, - device=cfg.device, - ) - - W_O = llama.model.layers[l].self_attn.o_proj.weight - - if not cfg.load_in_4bit: - W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) - - state_dict[f"blocks.{l}.attn.W_O"] = W_O.to(device=cfg.device) - - state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros( - cfg.d_model, dtype=cfg.dtype, device=cfg.device - ) - - state_dict[f"blocks.{l}.ln2.w"] = llama.model.layers[l].post_attention_layernorm.weight - - # in case of quantization, - # parameters should stay as bitsandbytes.nn.modules.Params4bit - if not cfg.load_in_4bit: - state_dict[f"blocks.{l}.mlp.W_in"] = llama.model.layers[l].mlp.up_proj.weight.T - state_dict[f"blocks.{l}.mlp.W_gate"] = llama.model.layers[l].mlp.gate_proj.weight.T - state_dict[f"blocks.{l}.mlp.W_out"] = llama.model.layers[l].mlp.down_proj.weight.T - else: - state_dict[f"blocks.{l}.mlp.W_in"] = llama.model.layers[l].mlp.up_proj.weight - state_dict[f"blocks.{l}.mlp.W_gate"] = llama.model.layers[l].mlp.gate_proj.weight - state_dict[f"blocks.{l}.mlp.W_out"] = llama.model.layers[l].mlp.down_proj.weight - - state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros( - cfg.d_mlp, dtype=cfg.dtype, device=cfg.device - ) - state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros( - cfg.d_model, dtype=cfg.dtype, device=cfg.device - ) - - state_dict["ln_final.w"] = llama.model.norm.weight - - state_dict["unembed.W_U"] = llama.lm_head.weight.T - state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype, device=cfg.device) - - return state_dict - - -def convert_qwen_weights(qwen, cfg: HookedTransformerConfig): - state_dict = {} - model = qwen.transformer - state_dict["embed.W_E"] = model.wte.weight - - assert cfg.d_mlp is not None # keep mypy happy - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = model.h[l].ln_1.weight - - W_Q, W_K, W_V = model.h[l].attn.c_attn.weight.split(split_size=cfg.d_model, dim=0) - W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) - W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_heads) - W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.W_V"] = W_V - - b_Q, b_K, b_V = model.h[l].attn.c_attn.bias.split(split_size=cfg.d_model, dim=0) - b_Q = einops.rearrange( - b_Q, - "(n_head d_head) -> n_head d_head", - n_head=cfg.n_heads, - ) - b_K = einops.rearrange( - b_K, - "(n_head d_head) -> n_head d_head", - n_head=cfg.n_heads, - ) - b_V = einops.rearrange( - b_V, - "(n_head d_head) -> n_head d_head", - n_head=cfg.n_heads, - ) - state_dict[f"blocks.{l}.attn.b_Q"] = b_Q - state_dict[f"blocks.{l}.attn.b_K"] = b_K - state_dict[f"blocks.{l}.attn.b_V"] = b_V - - W_O = model.h[l].attn.c_proj.weight - W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - - state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - state_dict[f"blocks.{l}.ln2.w"] = model.h[l].ln_2.weight - - state_dict[f"blocks.{l}.mlp.W_in"] = model.h[l].mlp.w1.weight.T - state_dict[f"blocks.{l}.mlp.W_gate"] = model.h[l].mlp.w2.weight.T - state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) - - state_dict[f"blocks.{l}.mlp.W_out"] = model.h[l].mlp.c_proj.weight.T - state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - state_dict["ln_final.w"] = model.ln_f.weight - - state_dict["unembed.W_U"] = qwen.lm_head.weight.T - state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - - return state_dict - - -def convert_qwen2_weights(qwen, cfg: HookedTransformerConfig): - # Note that this method is also applied for Qwen1.5 models, since they - # have architecture type Qwen2ForCausalLM. - - state_dict = {} - - state_dict["embed.W_E"] = qwen.model.embed_tokens.weight - - assert cfg.d_mlp is not None # keep mypy happy - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = qwen.model.layers[l].input_layernorm.weight - - W_Q = qwen.model.layers[l].self_attn.q_proj.weight - W_K = qwen.model.layers[l].self_attn.k_proj.weight - W_V = qwen.model.layers[l].self_attn.v_proj.weight - W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) - W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_heads) - W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_heads) - - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.W_V"] = W_V - - b_Q = qwen.model.layers[l].self_attn.q_proj.bias - b_Q = einops.rearrange( - b_Q, - "(n_head d_head) -> n_head d_head", - n_head=cfg.n_heads, - ) - - b_K = qwen.model.layers[l].self_attn.k_proj.bias - b_K = einops.rearrange( - b_K, - "(n_head d_head) -> n_head d_head", - n_head=cfg.n_heads, - ) - - b_V = qwen.model.layers[l].self_attn.v_proj.bias - b_V = einops.rearrange( - b_V, - "(n_head d_head) -> n_head d_head", - n_head=cfg.n_heads, - ) - - state_dict[f"blocks.{l}.attn.b_Q"] = b_Q - state_dict[f"blocks.{l}.attn.b_K"] = b_K - state_dict[f"blocks.{l}.attn.b_V"] = b_V - - W_O = qwen.model.layers[l].self_attn.o_proj.weight - W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - - state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - state_dict[f"blocks.{l}.ln2.w"] = qwen.model.layers[l].post_attention_layernorm.weight - - state_dict[f"blocks.{l}.mlp.W_in"] = qwen.model.layers[l].mlp.up_proj.weight.T - state_dict[f"blocks.{l}.mlp.W_gate"] = qwen.model.layers[l].mlp.gate_proj.weight.T - state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) - - state_dict[f"blocks.{l}.mlp.W_out"] = qwen.model.layers[l].mlp.down_proj.weight.T - state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - state_dict["ln_final.w"] = qwen.model.norm.weight - - state_dict["unembed.W_U"] = qwen.lm_head.weight.T - state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - - return state_dict - - -def convert_mistral_weights(mistral, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = mistral.model.embed_tokens.weight - - assert cfg.n_key_value_heads is not None # keep mypy happy - assert cfg.d_mlp is not None # keep mypy happy - - # Mistral has no biases anywhere - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = mistral.model.layers[l].input_layernorm.weight - - W_Q = mistral.model.layers[l].self_attn.q_proj.weight - W_K = mistral.model.layers[l].self_attn.k_proj.weight - W_V = mistral.model.layers[l].self_attn.v_proj.weight - W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) - W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) - W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn._W_K"] = W_K - state_dict[f"blocks.{l}.attn._W_V"] = W_V - - state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - state_dict[f"blocks.{l}.attn._b_K"] = torch.zeros( - cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype - ) - state_dict[f"blocks.{l}.attn._b_V"] = torch.zeros( - cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype - ) - - W_O = mistral.model.layers[l].self_attn.o_proj.weight - W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - - state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - state_dict[f"blocks.{l}.ln2.w"] = mistral.model.layers[l].post_attention_layernorm.weight - - state_dict[f"blocks.{l}.mlp.W_in"] = mistral.model.layers[l].mlp.up_proj.weight.T - state_dict[f"blocks.{l}.mlp.W_gate"] = mistral.model.layers[l].mlp.gate_proj.weight.T - state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) - - state_dict[f"blocks.{l}.mlp.W_out"] = mistral.model.layers[l].mlp.down_proj.weight.T - state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - state_dict["ln_final.w"] = mistral.model.norm.weight - - state_dict["unembed.W_U"] = mistral.lm_head.weight.T - state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - - return state_dict - - -def convert_mixtral_weights(mixtral, cfg: HookedTransformerConfig): - # The same as Mistral, but with the MLP replaced with MoE - # As with Mistral, Mixtral has no biases - - state_dict = {} - - assert cfg.n_key_value_heads is not None # keep mypy happy - assert cfg.d_mlp is not None - assert cfg.num_experts is not None - - state_dict["embed.W_E"] = mixtral.model.embed_tokens.weight - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = mixtral.model.layers[l].input_layernorm.weight - - W_Q = mixtral.model.layers[l].self_attn.q_proj.weight - W_K = mixtral.model.layers[l].self_attn.k_proj.weight - W_V = mixtral.model.layers[l].self_attn.v_proj.weight - W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) - W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) - W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn._W_K"] = W_K - state_dict[f"blocks.{l}.attn._W_V"] = W_V - - state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - state_dict[f"blocks.{l}.attn._b_K"] = torch.zeros( - cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype - ) - state_dict[f"blocks.{l}.attn._b_V"] = torch.zeros( - cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype - ) - - W_O = mixtral.model.layers[l].self_attn.o_proj.weight - W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - - state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - state_dict[f"blocks.{l}.ln2.w"] = mixtral.model.layers[l].post_attention_layernorm.weight - - state_dict[f"blocks.{l}.mlp.W_gate"] = mixtral.model.layers[ - l - ].block_sparse_moe.gate.weight.T - - # The mapping here from wn to W_{in/out/gate} is a bit confusing: - # w1 -> W_gate - # w2 -> W_out - # w3 -> W_in - # See https://github.com/mistralai/mistral-src/blob/main/mistral/model.py#L128 for reference - for e in range(cfg.num_experts): - state_dict[f"blocks.{l}.mlp.experts.{e}.W_in"] = ( - mixtral.model.layers[l].block_sparse_moe.experts[e].w3.weight.T - ) - state_dict[f"blocks.{l}.mlp.experts.{e}.W_gate"] = ( - mixtral.model.layers[l].block_sparse_moe.experts[e].w1.weight.T - ) - state_dict[f"blocks.{l}.mlp.experts.{e}.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) - state_dict[f"blocks.{l}.mlp.experts.{e}.W_out"] = ( - mixtral.model.layers[l].block_sparse_moe.experts[e].w2.weight.T - ) - state_dict[f"blocks.{l}.mlp.experts.{e}.b_out"] = torch.zeros( - cfg.d_model, dtype=cfg.dtype - ) - - state_dict["ln_final.w"] = mixtral.model.norm.weight.data - - state_dict["unembed.W_U"] = mixtral.lm_head.weight.T - state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - - return state_dict - - -def convert_opt_weights(opt, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = opt.model.decoder.embed_tokens.weight - state_dict["pos_embed.W_pos"] = opt.model.decoder.embed_positions.weight[2:, :] - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = opt.model.decoder.layers[l].self_attn_layer_norm.weight - state_dict[f"blocks.{l}.ln1.b"] = opt.model.decoder.layers[l].self_attn_layer_norm.bias - - W_Q = opt.model.decoder.layers[l].self_attn.q_proj.weight - W_K = opt.model.decoder.layers[l].self_attn.k_proj.weight - W_V = opt.model.decoder.layers[l].self_attn.v_proj.weight - W_Q = einops.rearrange( - W_Q, - "(index d_head) d_model->index d_model d_head", - index=cfg.n_heads, - ) - W_K = einops.rearrange( - W_K, - "(index d_head) d_model->index d_model d_head", - index=cfg.n_heads, - ) - W_V = einops.rearrange( - W_V, - "(index d_head) d_model->index d_model d_head", - index=cfg.n_heads, - ) - - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.W_V"] = W_V - - q_bias = einops.rearrange( - opt.model.decoder.layers[l].self_attn.q_proj.bias, - "(head_index d_head)->head_index d_head", - head_index=cfg.n_heads, - d_head=cfg.d_head, - ) - k_bias = einops.rearrange( - opt.model.decoder.layers[l].self_attn.k_proj.bias, - "(head_index d_head)->head_index d_head", - head_index=cfg.n_heads, - d_head=cfg.d_head, - ) - v_bias = einops.rearrange( - opt.model.decoder.layers[l].self_attn.v_proj.bias, - "(head_index d_head)->head_index d_head", - head_index=cfg.n_heads, - d_head=cfg.d_head, - ) - - state_dict[f"blocks.{l}.attn.b_Q"] = q_bias - state_dict[f"blocks.{l}.attn.b_K"] = k_bias - state_dict[f"blocks.{l}.attn.b_V"] = v_bias - - W_O = opt.model.decoder.layers[l].self_attn.out_proj.weight - W_O = einops.rearrange( - W_O, - "d_model (index d_head)->index d_head d_model", - index=cfg.n_heads, - ) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - state_dict[f"blocks.{l}.attn.b_O"] = opt.model.decoder.layers[l].self_attn.out_proj.bias - - state_dict[f"blocks.{l}.ln2.w"] = opt.model.decoder.layers[l].final_layer_norm.weight - state_dict[f"blocks.{l}.ln2.b"] = opt.model.decoder.layers[l].final_layer_norm.bias - - state_dict[f"blocks.{l}.mlp.W_in"] = opt.model.decoder.layers[l].fc1.weight.T - state_dict[f"blocks.{l}.mlp.W_out"] = opt.model.decoder.layers[l].fc2.weight.T - - state_dict[f"blocks.{l}.mlp.b_in"] = opt.model.decoder.layers[l].fc1.bias - state_dict[f"blocks.{l}.mlp.b_out"] = opt.model.decoder.layers[l].fc2.bias - state_dict["ln_final.w"] = opt.model.decoder.final_layer_norm.weight - state_dict["ln_final.b"] = opt.model.decoder.final_layer_norm.bias - state_dict["unembed.W_U"] = opt.lm_head.weight.T - state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - return state_dict - - -def convert_neel_solu_old_weights(state_dict: dict, cfg: HookedTransformerConfig): - """ - Converts the weights of my old SoLU models to the HookedTransformer format. - Takes as input a state dict, *not* a model object. - - There are a bunch of dumb bugs in the original code, sorry! - - Models 1L, 2L, 4L and 6L have left facing weights (ie, weights have shape - [dim_out, dim_in]) while HookedTransformer does right facing (ie [dim_in, - dim_out]). - - 8L has *just* a left facing W_pos, the rest right facing. - - And some models were trained with - """ - # Early models have left facing W_pos - reverse_pos = cfg.n_layers <= 8 - - # Models prior to 8L have left facing everything (8L has JUST left facing W_pos - sorry! Stupid bug) - reverse_weights = cfg.n_layers <= 6 - - new_state_dict = {} - for k, v in state_dict.items(): - k = k.replace("norm", "ln") - if k.startswith("ln."): - k = k.replace("ln.", "ln_final.") - new_state_dict[k] = v - - if reverse_pos: - new_state_dict["pos_embed.W_pos"] = new_state_dict["pos_embed.W_pos"].T - if reverse_weights: - for k, v in new_state_dict.items(): - if "W_" in k and "W_pos" not in k: - new_state_dict[k] = v.transpose(-2, -1) - return new_state_dict - - -def convert_mingpt_weights(old_state_dict, cfg: HookedTransformerConfig): - # mingpt (https://github.com/karpathy/minGPT) is mostly similar to GPT-2, - # but doesn't concat the QKV matrices. - state_dict = {} - - state_dict["embed.W_E"] = old_state_dict["tok_emb.weight"] - state_dict["pos_embed.W_pos"] = old_state_dict["pos_emb"].squeeze() - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = old_state_dict[f"blocks.{l}.ln1.weight"] - state_dict[f"blocks.{l}.ln1.b"] = old_state_dict[f"blocks.{l}.ln1.bias"] - - W_Q = old_state_dict[f"blocks.{l}.attn.query.weight"] - W_K = old_state_dict[f"blocks.{l}.attn.key.weight"] - W_V = old_state_dict[f"blocks.{l}.attn.value.weight"] - W_Q = einops.rearrange(W_Q, "(i h) m->i m h", i=cfg.n_heads) - W_K = einops.rearrange(W_K, "(i h) m->i m h", i=cfg.n_heads) - W_V = einops.rearrange(W_V, "(i h) m->i m h", i=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.W_V"] = W_V - - q_bias = einops.rearrange( - old_state_dict[f"blocks.{l}.attn.query.bias"], "(i h)->i h", i=cfg.n_heads - ) - k_bias = einops.rearrange( - old_state_dict[f"blocks.{l}.attn.key.bias"], "(i h)->i h", i=cfg.n_heads - ) - v_bias = einops.rearrange( - old_state_dict[f"blocks.{l}.attn.value.bias"], "(i h)->i h", i=cfg.n_heads - ) - - state_dict[f"blocks.{l}.attn.b_Q"] = q_bias - state_dict[f"blocks.{l}.attn.b_K"] = k_bias - state_dict[f"blocks.{l}.attn.b_V"] = v_bias - - W_O = old_state_dict[f"blocks.{l}.attn.proj.weight"] - W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - state_dict[f"blocks.{l}.attn.b_O"] = old_state_dict[f"blocks.{l}.attn.proj.bias"] - - state_dict[f"blocks.{l}.ln2.w"] = old_state_dict[f"blocks.{l}.ln2.weight"] - state_dict[f"blocks.{l}.ln2.b"] = old_state_dict[f"blocks.{l}.ln2.bias"] - - W_in = old_state_dict[f"blocks.{l}.mlp.0.weight"] - state_dict[f"blocks.{l}.mlp.W_in"] = W_in.T - state_dict[f"blocks.{l}.mlp.b_in"] = old_state_dict[f"blocks.{l}.mlp.0.bias"] - - W_out = old_state_dict[f"blocks.{l}.mlp.2.weight"] - state_dict[f"blocks.{l}.mlp.W_out"] = W_out.T - state_dict[f"blocks.{l}.mlp.b_out"] = old_state_dict[f"blocks.{l}.mlp.2.bias"] - - state_dict["unembed.W_U"] = old_state_dict["head.weight"].T - - state_dict["ln_final.w"] = old_state_dict["ln_f.weight"] - state_dict["ln_final.b"] = old_state_dict["ln_f.bias"] - - return state_dict - - -def convert_nanogpt_weights(old_state_dict, cfg: HookedTransformerConfig): - """For https://github.com/karpathy/nanoGPT - There are two complications with converting nanogpt models: - The first is that some state dicts have an unwanted prefix on keys that needs to be removed. - The second is that the models can be saved with or without bias. By default, there - is no bias. This function can handle both cases.""" - # Nanogpt models saved after torch.compile() have this unwanted prefix - # This is a simple way to remove it - unwanted_prefix = "_orig_mod." - for k, v in list(old_state_dict.items()): - if k.startswith(unwanted_prefix): - old_state_dict[k[len(unwanted_prefix) :]] = old_state_dict.pop(k) - - new_state_dict = {} - new_state_dict["pos_embed.W_pos"] = old_state_dict["transformer.wpe.weight"] - new_state_dict["embed.W_E"] = old_state_dict["transformer.wte.weight"] - - new_state_dict["ln_final.w"] = old_state_dict["transformer.ln_f.weight"] - new_state_dict["ln_final.b"] = torch.zeros_like(old_state_dict["transformer.ln_f.weight"]) - new_state_dict["unembed.W_U"] = old_state_dict["lm_head.weight"].T - - bias = False - if "transformer.ln_f.bias" in old_state_dict: - bias = True - new_state_dict["ln_final.b"] = old_state_dict["transformer.ln_f.bias"] - - for layer in range(cfg.n_layers): - layer_key = f"transformer.h.{layer}" - - new_state_dict[f"blocks.{layer}.ln1.w"] = old_state_dict[f"{layer_key}.ln_1.weight"] - # A bias of zeros is required for folding layer norm - new_state_dict[f"blocks.{layer}.ln1.b"] = torch.zeros_like( - old_state_dict[f"{layer_key}.ln_1.weight"] - ) - new_state_dict[f"blocks.{layer}.ln2.w"] = old_state_dict[f"{layer_key}.ln_2.weight"] - new_state_dict[f"blocks.{layer}.ln2.b"] = torch.zeros_like( - old_state_dict[f"{layer_key}.ln_2.weight"] - ) - - W = old_state_dict[f"{layer_key}.attn.c_attn.weight"] - W_Q, W_K, W_V = torch.tensor_split(W, 3, dim=0) - W_Q = einops.rearrange(W_Q, "(i h) m->i m h", i=cfg.n_heads) - W_K = einops.rearrange(W_K, "(i h) m->i m h", i=cfg.n_heads) - W_V = einops.rearrange(W_V, "(i h) m->i m h", i=cfg.n_heads) - new_state_dict[f"blocks.{layer}.attn.W_Q"] = W_Q - new_state_dict[f"blocks.{layer}.attn.W_K"] = W_K - new_state_dict[f"blocks.{layer}.attn.W_V"] = W_V - - W_O = old_state_dict[f"{layer_key}.attn.c_proj.weight"] - W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) - new_state_dict[f"blocks.{layer}.attn.W_O"] = W_O - - new_state_dict[f"blocks.{layer}.mlp.W_in"] = old_state_dict[ - f"{layer_key}.mlp.c_fc.weight" - ].T - new_state_dict[f"blocks.{layer}.mlp.W_out"] = old_state_dict[ - f"{layer_key}.mlp.c_proj.weight" - ].T - - if bias: - new_state_dict[f"blocks.{layer}.ln1.b"] = old_state_dict[f"{layer_key}.ln_1.bias"] - new_state_dict[f"blocks.{layer}.ln2.b"] = old_state_dict[f"{layer_key}.ln_2.bias"] - new_state_dict[f"blocks.{layer}.mlp.b_in"] = old_state_dict[ - f"{layer_key}.mlp.c_fc.bias" - ] - new_state_dict[f"blocks.{layer}.mlp.b_out"] = old_state_dict[ - f"{layer_key}.mlp.c_proj.bias" - ] - - B = old_state_dict[f"{layer_key}.attn.c_attn.bias"] - B_Q, B_K, B_V = torch.tensor_split(B, 3, dim=0) - B_Q = einops.rearrange(B_Q, "(i h)->i h", i=cfg.n_heads) - B_K = einops.rearrange(B_K, "(i h)->i h", i=cfg.n_heads) - B_V = einops.rearrange(B_V, "(i h)->i h", i=cfg.n_heads) - new_state_dict[f"blocks.{layer}.attn.b_Q"] = B_Q - new_state_dict[f"blocks.{layer}.attn.b_K"] = B_K - new_state_dict[f"blocks.{layer}.attn.b_V"] = B_V - new_state_dict[f"blocks.{layer}.attn.b_O"] = old_state_dict[ - f"{layer_key}.attn.c_proj.bias" - ] - - return new_state_dict - - -def convert_bert_weights(bert, cfg: HookedTransformerConfig): - embeddings = bert.bert.embeddings - state_dict = { - "embed.embed.W_E": embeddings.word_embeddings.weight, - "embed.pos_embed.W_pos": embeddings.position_embeddings.weight, - "embed.token_type_embed.W_token_type": embeddings.token_type_embeddings.weight, - "embed.ln.w": embeddings.LayerNorm.weight, - "embed.ln.b": embeddings.LayerNorm.bias, - } - - for l in range(cfg.n_layers): - block = bert.bert.encoder.layer[l] - state_dict[f"blocks.{l}.attn.W_Q"] = einops.rearrange( - block.attention.self.query.weight, "(i h) m -> i m h", i=cfg.n_heads - ) - state_dict[f"blocks.{l}.attn.b_Q"] = einops.rearrange( - block.attention.self.query.bias, "(i h) -> i h", i=cfg.n_heads - ) - state_dict[f"blocks.{l}.attn.W_K"] = einops.rearrange( - block.attention.self.key.weight, "(i h) m -> i m h", i=cfg.n_heads - ) - state_dict[f"blocks.{l}.attn.b_K"] = einops.rearrange( - block.attention.self.key.bias, "(i h) -> i h", i=cfg.n_heads - ) - state_dict[f"blocks.{l}.attn.W_V"] = einops.rearrange( - block.attention.self.value.weight, "(i h) m -> i m h", i=cfg.n_heads - ) - state_dict[f"blocks.{l}.attn.b_V"] = einops.rearrange( - block.attention.self.value.bias, "(i h) -> i h", i=cfg.n_heads - ) - state_dict[f"blocks.{l}.attn.W_O"] = einops.rearrange( - block.attention.output.dense.weight, - "m (i h) -> i h m", - i=cfg.n_heads, - ) - state_dict[f"blocks.{l}.attn.b_O"] = block.attention.output.dense.bias - state_dict[f"blocks.{l}.ln1.w"] = block.attention.output.LayerNorm.weight - state_dict[f"blocks.{l}.ln1.b"] = block.attention.output.LayerNorm.bias - state_dict[f"blocks.{l}.mlp.W_in"] = einops.rearrange( - block.intermediate.dense.weight, "mlp model -> model mlp" - ) - state_dict[f"blocks.{l}.mlp.b_in"] = block.intermediate.dense.bias - state_dict[f"blocks.{l}.mlp.W_out"] = einops.rearrange( - block.output.dense.weight, "model mlp -> mlp model" - ) - state_dict[f"blocks.{l}.mlp.b_out"] = block.output.dense.bias - state_dict[f"blocks.{l}.ln2.w"] = block.output.LayerNorm.weight - state_dict[f"blocks.{l}.ln2.b"] = block.output.LayerNorm.bias - - mlm_head = bert.cls.predictions - state_dict["mlm_head.W"] = mlm_head.transform.dense.weight - state_dict["mlm_head.b"] = mlm_head.transform.dense.bias - state_dict["mlm_head.ln.w"] = mlm_head.transform.LayerNorm.weight - state_dict["mlm_head.ln.b"] = mlm_head.transform.LayerNorm.bias - # Note: BERT uses tied embeddings - state_dict["unembed.W_U"] = embeddings.word_embeddings.weight.T - # "unembed.W_U": mlm_head.decoder.weight.T, - state_dict["unembed.b_U"] = mlm_head.bias - - return state_dict - - -def convert_bloom_weights(bloom, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = bloom.transformer.word_embeddings.weight - - # Bloom uses post embedding layer norm - state_dict["embed.ln.w"] = bloom.transformer.word_embeddings_layernorm.weight - state_dict["embed.ln.b"] = bloom.transformer.word_embeddings_layernorm.bias - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = bloom.transformer.h[l].input_layernorm.weight - state_dict[f"blocks.{l}.ln1.b"] = bloom.transformer.h[l].input_layernorm.bias - - W = bloom.transformer.h[l].self_attention.query_key_value.weight - - W_split = W.T.reshape(cfg.d_model, cfg.n_heads, 3, cfg.d_head) - - W_Q, W_K, W_V = W_split[..., 0, :], W_split[..., 1, :], W_split[..., 2, :] - W_Q = einops.rearrange(W_Q, "m n h ->n m h", n=cfg.n_heads) - W_K = einops.rearrange(W_K, "m n h ->n m h", n=cfg.n_heads) - W_V = einops.rearrange(W_V, "m n h ->n m h", n=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.W_V"] = W_V - - qkv_bias = bloom.transformer.h[l].self_attention.query_key_value.bias - qkv_bias = qkv_bias.reshape(cfg.n_heads, 3, cfg.d_head) - - state_dict[f"blocks.{l}.attn.b_Q"] = qkv_bias[:, 0, :] - state_dict[f"blocks.{l}.attn.b_K"] = qkv_bias[:, 1, :] - state_dict[f"blocks.{l}.attn.b_V"] = qkv_bias[:, 2, :] - - W_O = bloom.transformer.h[l].self_attention.dense.weight.T # [1024, 1024] - W_O = einops.rearrange(W_O, "(n h) m->n h m", n=cfg.n_heads) # [n_heads, d_head, d_model] - state_dict[f"blocks.{l}.attn.W_O"] = W_O - state_dict[f"blocks.{l}.attn.b_O"] = bloom.transformer.h[l].self_attention.dense.bias - - state_dict[f"blocks.{l}.ln2.w"] = bloom.transformer.h[l].post_attention_layernorm.weight - state_dict[f"blocks.{l}.ln2.b"] = bloom.transformer.h[l].post_attention_layernorm.bias - - W_in = bloom.transformer.h[l].mlp.dense_h_to_4h.weight.T - state_dict[f"blocks.{l}.mlp.W_in"] = W_in - state_dict[f"blocks.{l}.mlp.b_in"] = bloom.transformer.h[l].mlp.dense_h_to_4h.bias - - W_out = bloom.transformer.h[l].mlp.dense_4h_to_h.weight.T - state_dict[f"blocks.{l}.mlp.W_out"] = W_out - state_dict[f"blocks.{l}.mlp.b_out"] = bloom.transformer.h[l].mlp.dense_4h_to_h.bias - state_dict["unembed.W_U"] = bloom.lm_head.weight.T - - state_dict["ln_final.w"] = bloom.transformer.ln_f.weight - state_dict["ln_final.b"] = bloom.transformer.ln_f.bias - return state_dict - - -def convert_coder_weights(model, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = model.transformer.wte.weight - state_dict["pos_embed.W_pos"] = model.transformer.wpe.weight - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = model.transformer.h[l].ln_1.weight - state_dict[f"blocks.{l}.ln1.b"] = model.transformer.h[l].ln_1.bias - - # In GPT-2, q,k,v are produced by one big linear map, whose output is - # concat([q, k, v]) - W_KV = model.transformer.h[l].attn.kv_attn.weight # [d_model, 2 * d_head] - W_K, W_V = torch.tensor_split(W_KV, 2, dim=1) - W_Q = model.transformer.h[l].attn.q_attn.weight # [d_model, d_model] - W_Q = einops.rearrange(W_Q, "m (i h)->i m h", i=cfg.n_heads) - W_K = einops.repeat(W_K, "m h -> i m h", i=cfg.n_heads) - W_V = einops.repeat(W_V, "m h -> i m h", i=cfg.n_heads) - - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.W_V"] = W_V - - b_Q = einops.rearrange( - model.transformer.h[l].attn.q_attn.bias, - "(index head)-> index head", - index=cfg.n_heads, - head=cfg.d_head, - ) - b_KV = model.transformer.h[l].attn.kv_attn.bias # [2 * d_head] - b_K, b_V = torch.tensor_split(b_KV, 2, dim=0) - b_K = einops.repeat(b_K, "head -> index head", index=cfg.n_heads) - b_V = einops.repeat(b_V, "head -> index head", index=cfg.n_heads) - state_dict[f"blocks.{l}.attn.b_Q"] = b_Q - state_dict[f"blocks.{l}.attn.b_K"] = b_K - state_dict[f"blocks.{l}.attn.b_V"] = b_V - - W_O = model.transformer.h[l].attn.c_proj.weight - W_O = einops.rearrange(W_O, "(i h) m->i h m", i=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - state_dict[f"blocks.{l}.attn.b_O"] = model.transformer.h[l].attn.c_proj.bias - - state_dict[f"blocks.{l}.ln2.w"] = model.transformer.h[l].ln_2.weight - state_dict[f"blocks.{l}.ln2.b"] = model.transformer.h[l].ln_2.bias - - W_in = model.transformer.h[l].mlp.c_fc.weight - state_dict[f"blocks.{l}.mlp.W_in"] = W_in - state_dict[f"blocks.{l}.mlp.b_in"] = model.transformer.h[l].mlp.c_fc.bias - - W_out = model.transformer.h[l].mlp.c_proj.weight - state_dict[f"blocks.{l}.mlp.W_out"] = W_out - state_dict[f"blocks.{l}.mlp.b_out"] = model.transformer.h[l].mlp.c_proj.bias - state_dict["unembed.W_U"] = model.lm_head.weight.T - - state_dict["ln_final.w"] = model.transformer.ln_f.weight - state_dict["ln_final.b"] = model.transformer.ln_f.bias - return state_dict - - -def convert_phi_weights(phi, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = phi.model.embed_tokens.weight - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = phi.model.layers[l].input_layernorm.weight - state_dict[f"blocks.{l}.ln1.b"] = phi.model.layers[l].input_layernorm.bias - - W_Q = phi.model.layers[l].self_attn.q_proj.weight - W_K = phi.model.layers[l].self_attn.k_proj.weight - W_V = phi.model.layers[l].self_attn.v_proj.weight - W_Q = einops.rearrange( - W_Q, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads - ) - W_K = einops.rearrange( - W_K, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads - ) - W_V = einops.rearrange( - W_V, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads - ) - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.W_V"] = W_V - - b_Q = phi.model.layers[l].self_attn.q_proj.bias - b_K = phi.model.layers[l].self_attn.k_proj.bias - b_V = phi.model.layers[l].self_attn.v_proj.bias - b_Q = einops.rearrange(b_Q, "(n_head d_head) -> n_head d_head", n_head=cfg.n_heads) - b_K = einops.rearrange(b_K, "(n_head d_head) -> n_head d_head", n_head=cfg.n_heads) - b_V = einops.rearrange(b_V, "(n_head d_head) -> n_head d_head", n_head=cfg.n_heads) - state_dict[f"blocks.{l}.attn.b_Q"] = b_Q - state_dict[f"blocks.{l}.attn.b_K"] = b_K - state_dict[f"blocks.{l}.attn.b_V"] = b_V - - W_O = phi.model.layers[l].self_attn.dense.weight - W_O = einops.rearrange( - W_O, "d_model (n_head d_head) -> n_head d_head d_model", n_head=cfg.n_heads - ) - - state_dict[f"blocks.{l}.attn.W_O"] = W_O - state_dict[f"blocks.{l}.attn.b_O"] = phi.model.layers[l].self_attn.dense.bias - - # Layer Norm 1 and 2 are tied. - state_dict[f"blocks.{l}.ln2.w"] = state_dict[f"blocks.{l}.ln1.w"] - state_dict[f"blocks.{l}.ln2.b"] = state_dict[f"blocks.{l}.ln1.b"] - - state_dict[f"blocks.{l}.mlp.W_in"] = phi.model.layers[l].mlp.fc1.weight.T - state_dict[f"blocks.{l}.mlp.b_in"] = phi.model.layers[l].mlp.fc1.bias - state_dict[f"blocks.{l}.mlp.W_out"] = phi.model.layers[l].mlp.fc2.weight.T - state_dict[f"blocks.{l}.mlp.b_out"] = phi.model.layers[l].mlp.fc2.bias - - state_dict["ln_final.w"] = phi.model.final_layernorm.weight - state_dict["ln_final.b"] = phi.model.final_layernorm.bias - - state_dict["unembed.W_U"] = phi.lm_head.weight.T - state_dict["unembed.b_U"] = phi.lm_head.bias - - return state_dict - - -def convert_phi3_weights(phi, cfg: HookedTransformerConfig): - state_dict = {} - - state_dict["embed.W_E"] = phi.model.embed_tokens.weight - - for l in range(cfg.n_layers): - state_dict[f"blocks.{l}.ln1.w"] = phi.model.layers[l].input_layernorm.weight - state_dict[f"blocks.{l}.ln1.b"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - - W = phi.model.layers[l].self_attn.qkv_proj.weight - W_Q, W_K, W_V = torch.tensor_split(W, 3, dim=0) - W_Q = einops.rearrange( - W_Q, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads - ) - W_K = einops.rearrange( - W_K, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads - ) - W_V = einops.rearrange( - W_V, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads - ) - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - state_dict[f"blocks.{l}.attn.W_K"] = W_K - state_dict[f"blocks.{l}.attn.b_K"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - state_dict[f"blocks.{l}.attn.W_V"] = W_V - state_dict[f"blocks.{l}.attn.b_V"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - - W_O = phi.model.layers[l].self_attn.o_proj.weight - W_O = einops.rearrange( - W_O, "d_model (n_head d_head) -> n_head d_head d_model", n_head=cfg.n_heads - ) - - state_dict[f"blocks.{l}.attn.W_O"] = W_O - state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - state_dict[f"blocks.{l}.ln2.w"] = phi.model.layers[l].post_attention_layernorm.weight - state_dict[f"blocks.{l}.ln2.b"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - - W = phi.model.layers[l].mlp.gate_up_proj.weight.T - W_gate, W_in = torch.tensor_split(W, 2, dim=1) - state_dict[f"blocks.{l}.mlp.W_in"] = W_in - state_dict[f"blocks.{l}.mlp.W_gate"] = W_gate - state_dict[f"blocks.{l}.mlp.W_out"] = phi.model.layers[l].mlp.down_proj.weight.T - - state_dict["ln_final.w"] = phi.model.norm.weight - - state_dict["unembed.W_U"] = phi.lm_head.weight.T - state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - - return state_dict - - -def convert_gemma_weights(gemma, cfg: HookedTransformerConfig): - state_dict = {} - - assert cfg.n_key_value_heads is not None # keep mypy happy - assert cfg.d_mlp is not None # keep mypy happy - - # Gemma Models scale embeddings by multiplying by sqrt(d_model), use hidden state type to match - # HF implementation - state_dict["embed.W_E"] = gemma.model.embed_tokens.weight * torch.tensor( - cfg.d_model**0.5, dtype=cfg.dtype - ) - - # Gemma has no biases anywhere - for l in range(cfg.n_layers): - # GemmaRMSNorm adds 1 to weights before multiplying by input, keep RMS calcs in float32 - state_dict[f"blocks.{l}.ln1.w"] = gemma.model.layers[ - l - ].input_layernorm.weight.float() + torch.ones_like( - gemma.model.layers[l].input_layernorm.weight, dtype=torch.float32 - ) - - W_Q = gemma.model.layers[l].self_attn.q_proj.weight - W_K = gemma.model.layers[l].self_attn.k_proj.weight - W_V = gemma.model.layers[l].self_attn.v_proj.weight - W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) - W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) - W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) - state_dict[f"blocks.{l}.attn.W_Q"] = W_Q - state_dict[f"blocks.{l}.attn._W_K"] = W_K - state_dict[f"blocks.{l}.attn._W_V"] = W_V - - state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) - state_dict[f"blocks.{l}.attn._b_K"] = torch.zeros( - cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype - ) - state_dict[f"blocks.{l}.attn._b_V"] = torch.zeros( - cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype - ) - - W_O = gemma.model.layers[l].self_attn.o_proj.weight - W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) - state_dict[f"blocks.{l}.attn.W_O"] = W_O - - state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - # GemmaRMSNorm adds 1 to weights before multiplying by input, keep RMS calcs in float32 - state_dict[f"blocks.{l}.ln2.w"] = gemma.model.layers[ - l - ].post_attention_layernorm.weight.float() + torch.ones_like( - gemma.model.norm.weight, dtype=torch.float32 - ) - - state_dict[f"blocks.{l}.mlp.W_in"] = gemma.model.layers[l].mlp.up_proj.weight.T - state_dict[f"blocks.{l}.mlp.W_gate"] = gemma.model.layers[l].mlp.gate_proj.weight.T - state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) - - state_dict[f"blocks.{l}.mlp.W_out"] = gemma.model.layers[l].mlp.down_proj.weight.T - state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) - - # GemmaRMSNorm adds 1 to weights before multiplying by input, keep RMS calcs in float32 - state_dict["ln_final.w"] = gemma.model.norm.weight.float() + torch.ones_like( - gemma.model.norm.weight, dtype=torch.float32 - ) - - state_dict["unembed.W_U"] = gemma.lm_head.weight.T - state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) - - return state_dict - - @dataclasses.dataclass class Config: d_model: int = 768 diff --git a/transformer_lens/pretrained/__init__.py b/transformer_lens/pretrained/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/transformer_lens/pretrained/weight_conversions/__init__.py b/transformer_lens/pretrained/weight_conversions/__init__.py new file mode 100644 index 0000000..b13850e --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/__init__.py @@ -0,0 +1,20 @@ +from .neo import convert_neo_weights +from .gpt2 import convert_gpt2_weights +from .opt import convert_opt_weights +from .gptj import convert_gptj_weights +from .neox import convert_neox_weights +from .llama import convert_llama_weights +from .bert import convert_bert_weights +from .mistral import convert_mistral_weights +from .mixtral import convert_mixtral_weights +from .bloom import convert_bloom_weights +from .coder import convert_coder_weights +from .qwen import convert_qwen_weights +from .qwen2 import convert_qwen2_weights +from .phi import convert_phi_weights +from .phi3 import convert_phi3_weights +from .gemma import convert_gemma_weights +from .mingpt import convert_mingpt_weights +from .nanogpt import convert_nanogpt_weights +from .t5 import convert_t5_weights +from .neel_solu_old import convert_neel_solu_old_weights diff --git a/transformer_lens/pretrained/weight_conversions/bert.py b/transformer_lens/pretrained/weight_conversions/bert.py new file mode 100644 index 0000000..965a8da --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/bert.py @@ -0,0 +1,65 @@ +import einops + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_bert_weights(bert, cfg: HookedTransformerConfig): + embeddings = bert.bert.embeddings + state_dict = { + "embed.embed.W_E": embeddings.word_embeddings.weight, + "embed.pos_embed.W_pos": embeddings.position_embeddings.weight, + "embed.token_type_embed.W_token_type": embeddings.token_type_embeddings.weight, + "embed.ln.w": embeddings.LayerNorm.weight, + "embed.ln.b": embeddings.LayerNorm.bias, + } + + for l in range(cfg.n_layers): + block = bert.bert.encoder.layer[l] + state_dict[f"blocks.{l}.attn.W_Q"] = einops.rearrange( + block.attention.self.query.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + state_dict[f"blocks.{l}.attn.b_Q"] = einops.rearrange( + block.attention.self.query.bias, "(i h) -> i h", i=cfg.n_heads + ) + state_dict[f"blocks.{l}.attn.W_K"] = einops.rearrange( + block.attention.self.key.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + state_dict[f"blocks.{l}.attn.b_K"] = einops.rearrange( + block.attention.self.key.bias, "(i h) -> i h", i=cfg.n_heads + ) + state_dict[f"blocks.{l}.attn.W_V"] = einops.rearrange( + block.attention.self.value.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + state_dict[f"blocks.{l}.attn.b_V"] = einops.rearrange( + block.attention.self.value.bias, "(i h) -> i h", i=cfg.n_heads + ) + state_dict[f"blocks.{l}.attn.W_O"] = einops.rearrange( + block.attention.output.dense.weight, + "m (i h) -> i h m", + i=cfg.n_heads, + ) + state_dict[f"blocks.{l}.attn.b_O"] = block.attention.output.dense.bias + state_dict[f"blocks.{l}.ln1.w"] = block.attention.output.LayerNorm.weight + state_dict[f"blocks.{l}.ln1.b"] = block.attention.output.LayerNorm.bias + state_dict[f"blocks.{l}.mlp.W_in"] = einops.rearrange( + block.intermediate.dense.weight, "mlp model -> model mlp" + ) + state_dict[f"blocks.{l}.mlp.b_in"] = block.intermediate.dense.bias + state_dict[f"blocks.{l}.mlp.W_out"] = einops.rearrange( + block.output.dense.weight, "model mlp -> mlp model" + ) + state_dict[f"blocks.{l}.mlp.b_out"] = block.output.dense.bias + state_dict[f"blocks.{l}.ln2.w"] = block.output.LayerNorm.weight + state_dict[f"blocks.{l}.ln2.b"] = block.output.LayerNorm.bias + + mlm_head = bert.cls.predictions + state_dict["mlm_head.W"] = mlm_head.transform.dense.weight + state_dict["mlm_head.b"] = mlm_head.transform.dense.bias + state_dict["mlm_head.ln.w"] = mlm_head.transform.LayerNorm.weight + state_dict["mlm_head.ln.b"] = mlm_head.transform.LayerNorm.bias + # Note: BERT uses tied embeddings + state_dict["unembed.W_U"] = embeddings.word_embeddings.weight.T + # "unembed.W_U": mlm_head.decoder.weight.T, + state_dict["unembed.b_U"] = mlm_head.bias + + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/bloom.py b/transformer_lens/pretrained/weight_conversions/bloom.py new file mode 100644 index 0000000..e52fa90 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/bloom.py @@ -0,0 +1,57 @@ +import einops + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_bloom_weights(bloom, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = bloom.transformer.word_embeddings.weight + + # Bloom uses post embedding layer norm + state_dict["embed.ln.w"] = bloom.transformer.word_embeddings_layernorm.weight + state_dict["embed.ln.b"] = bloom.transformer.word_embeddings_layernorm.bias + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = bloom.transformer.h[l].input_layernorm.weight + state_dict[f"blocks.{l}.ln1.b"] = bloom.transformer.h[l].input_layernorm.bias + + W = bloom.transformer.h[l].self_attention.query_key_value.weight + + W_split = W.T.reshape(cfg.d_model, cfg.n_heads, 3, cfg.d_head) + + W_Q, W_K, W_V = W_split[..., 0, :], W_split[..., 1, :], W_split[..., 2, :] + W_Q = einops.rearrange(W_Q, "m n h ->n m h", n=cfg.n_heads) + W_K = einops.rearrange(W_K, "m n h ->n m h", n=cfg.n_heads) + W_V = einops.rearrange(W_V, "m n h ->n m h", n=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.W_K"] = W_K + state_dict[f"blocks.{l}.attn.W_V"] = W_V + + qkv_bias = bloom.transformer.h[l].self_attention.query_key_value.bias + qkv_bias = qkv_bias.reshape(cfg.n_heads, 3, cfg.d_head) + + state_dict[f"blocks.{l}.attn.b_Q"] = qkv_bias[:, 0, :] + state_dict[f"blocks.{l}.attn.b_K"] = qkv_bias[:, 1, :] + state_dict[f"blocks.{l}.attn.b_V"] = qkv_bias[:, 2, :] + + W_O = bloom.transformer.h[l].self_attention.dense.weight.T # [1024, 1024] + W_O = einops.rearrange(W_O, "(n h) m->n h m", n=cfg.n_heads) # [n_heads, d_head, d_model] + state_dict[f"blocks.{l}.attn.W_O"] = W_O + state_dict[f"blocks.{l}.attn.b_O"] = bloom.transformer.h[l].self_attention.dense.bias + + state_dict[f"blocks.{l}.ln2.w"] = bloom.transformer.h[l].post_attention_layernorm.weight + state_dict[f"blocks.{l}.ln2.b"] = bloom.transformer.h[l].post_attention_layernorm.bias + + W_in = bloom.transformer.h[l].mlp.dense_h_to_4h.weight.T + state_dict[f"blocks.{l}.mlp.W_in"] = W_in + state_dict[f"blocks.{l}.mlp.b_in"] = bloom.transformer.h[l].mlp.dense_h_to_4h.bias + + W_out = bloom.transformer.h[l].mlp.dense_4h_to_h.weight.T + state_dict[f"blocks.{l}.mlp.W_out"] = W_out + state_dict[f"blocks.{l}.mlp.b_out"] = bloom.transformer.h[l].mlp.dense_4h_to_h.bias + state_dict["unembed.W_U"] = bloom.lm_head.weight.T + + state_dict["ln_final.w"] = bloom.transformer.ln_f.weight + state_dict["ln_final.b"] = bloom.transformer.ln_f.bias + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/coder.py b/transformer_lens/pretrained/weight_conversions/coder.py new file mode 100644 index 0000000..5b51161 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/coder.py @@ -0,0 +1,63 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_coder_weights(model, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = model.transformer.wte.weight + state_dict["pos_embed.W_pos"] = model.transformer.wpe.weight + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = model.transformer.h[l].ln_1.weight + state_dict[f"blocks.{l}.ln1.b"] = model.transformer.h[l].ln_1.bias + + # In GPT-2, q,k,v are produced by one big linear map, whose output is + # concat([q, k, v]) + W_KV = model.transformer.h[l].attn.kv_attn.weight # [d_model, 2 * d_head] + W_K, W_V = torch.tensor_split(W_KV, 2, dim=1) + W_Q = model.transformer.h[l].attn.q_attn.weight # [d_model, d_model] + W_Q = einops.rearrange(W_Q, "m (i h)->i m h", i=cfg.n_heads) + W_K = einops.repeat(W_K, "m h -> i m h", i=cfg.n_heads) + W_V = einops.repeat(W_V, "m h -> i m h", i=cfg.n_heads) + + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.W_K"] = W_K + state_dict[f"blocks.{l}.attn.W_V"] = W_V + + b_Q = einops.rearrange( + model.transformer.h[l].attn.q_attn.bias, + "(index head)-> index head", + index=cfg.n_heads, + head=cfg.d_head, + ) + b_KV = model.transformer.h[l].attn.kv_attn.bias # [2 * d_head] + b_K, b_V = torch.tensor_split(b_KV, 2, dim=0) + b_K = einops.repeat(b_K, "head -> index head", index=cfg.n_heads) + b_V = einops.repeat(b_V, "head -> index head", index=cfg.n_heads) + state_dict[f"blocks.{l}.attn.b_Q"] = b_Q + state_dict[f"blocks.{l}.attn.b_K"] = b_K + state_dict[f"blocks.{l}.attn.b_V"] = b_V + + W_O = model.transformer.h[l].attn.c_proj.weight + W_O = einops.rearrange(W_O, "(i h) m->i h m", i=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + state_dict[f"blocks.{l}.attn.b_O"] = model.transformer.h[l].attn.c_proj.bias + + state_dict[f"blocks.{l}.ln2.w"] = model.transformer.h[l].ln_2.weight + state_dict[f"blocks.{l}.ln2.b"] = model.transformer.h[l].ln_2.bias + + W_in = model.transformer.h[l].mlp.c_fc.weight + state_dict[f"blocks.{l}.mlp.W_in"] = W_in + state_dict[f"blocks.{l}.mlp.b_in"] = model.transformer.h[l].mlp.c_fc.bias + + W_out = model.transformer.h[l].mlp.c_proj.weight + state_dict[f"blocks.{l}.mlp.W_out"] = W_out + state_dict[f"blocks.{l}.mlp.b_out"] = model.transformer.h[l].mlp.c_proj.bias + state_dict["unembed.W_U"] = model.lm_head.weight.T + + state_dict["ln_final.w"] = model.transformer.ln_f.weight + state_dict["ln_final.b"] = model.transformer.ln_f.bias + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/gemma.py b/transformer_lens/pretrained/weight_conversions/gemma.py new file mode 100644 index 0000000..0c46bea --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/gemma.py @@ -0,0 +1,95 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_gemma_weights(gemma, cfg: HookedTransformerConfig): + state_dict = {} + + assert cfg.n_key_value_heads is not None # keep mypy happy + assert cfg.d_mlp is not None # keep mypy happy + + # Gemma Models scale embeddings by multiplying by sqrt(d_model), use hidden state type to match + # HF implementation + state_dict["embed.W_E"] = gemma.model.embed_tokens.weight * torch.tensor( + cfg.d_model**0.5, dtype=cfg.dtype + ) + + # Gemma has no biases anywhere + for l in range(cfg.n_layers): + # GemmaRMSNorm adds 1 to weights before multiplying by input, keep RMS calcs in float32 + state_dict[f"blocks.{l}.ln1.w"] = gemma.model.layers[ + l + ].input_layernorm.weight.float() + torch.ones_like( + gemma.model.layers[l].input_layernorm.weight, dtype=torch.float32 + ) + if cfg.use_normalization_before_and_after: + # Only applies for Gemma 2 + state_dict[f"blocks.{l}.ln1_post.w"] = gemma.model.layers[ + l + ].post_attention_layernorm.weight.float() + torch.ones_like( + gemma.model.layers[l].input_layernorm.weight, dtype=torch.float32 + ) + + W_Q = gemma.model.layers[l].self_attn.q_proj.weight + W_K = gemma.model.layers[l].self_attn.k_proj.weight + W_V = gemma.model.layers[l].self_attn.v_proj.weight + W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) + W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) + W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn._W_K"] = W_K + state_dict[f"blocks.{l}.attn._W_V"] = W_V + + state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + state_dict[f"blocks.{l}.attn._b_K"] = torch.zeros( + cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype + ) + state_dict[f"blocks.{l}.attn._b_V"] = torch.zeros( + cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype + ) + + W_O = gemma.model.layers[l].self_attn.o_proj.weight + W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + + state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + # GemmaRMSNorm adds 1 to weights before multiplying by input, keep RMS calcs in float32 + if not cfg.use_normalization_before_and_after: + # Only applies for Gemma 1. Confusingly post_attention_layernorm is applied to mlp_input in Gemma 1 and attn_out in Gemma 2 + state_dict[f"blocks.{l}.ln2.w"] = gemma.model.layers[ + l + ].post_attention_layernorm.weight.float() + torch.ones_like( + gemma.model.norm.weight, dtype=torch.float32 + ) + else: + # Only applies for Gemma 2 + state_dict[f"blocks.{l}.ln2.w"] = gemma.model.layers[ + l + ].pre_feedforward_layernorm.weight.float() + torch.ones_like( + gemma.model.layers[l].pre_feedforward_layernorm.weight, dtype=torch.float32 + ) + state_dict[f"blocks.{l}.ln2_post.w"] = gemma.model.layers[ + l + ].post_feedforward_layernorm.weight.float() + torch.ones_like( + gemma.model.layers[l].post_feedforward_layernorm.weight, dtype=torch.float32 + ) + + state_dict[f"blocks.{l}.mlp.W_in"] = gemma.model.layers[l].mlp.up_proj.weight.T + state_dict[f"blocks.{l}.mlp.W_gate"] = gemma.model.layers[l].mlp.gate_proj.weight.T + state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) + + state_dict[f"blocks.{l}.mlp.W_out"] = gemma.model.layers[l].mlp.down_proj.weight.T + state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + # GemmaRMSNorm adds 1 to weights before multiplying by input, keep RMS calcs in float32 + state_dict["ln_final.w"] = gemma.model.norm.weight.float() + torch.ones_like( + gemma.model.norm.weight, dtype=torch.float32 + ) + + state_dict["unembed.W_U"] = gemma.lm_head.weight.T + state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/gpt2.py b/transformer_lens/pretrained/weight_conversions/gpt2.py new file mode 100644 index 0000000..af9b8e7 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/gpt2.py @@ -0,0 +1,60 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_gpt2_weights(gpt2, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = gpt2.transformer.wte.weight + state_dict["pos_embed.W_pos"] = gpt2.transformer.wpe.weight + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = gpt2.transformer.h[l].ln_1.weight + state_dict[f"blocks.{l}.ln1.b"] = gpt2.transformer.h[l].ln_1.bias + + # In GPT-2, q,k,v are produced by one big linear map, whose output is + # concat([q, k, v]) + W = gpt2.transformer.h[l].attn.c_attn.weight + W_Q, W_K, W_V = torch.tensor_split(W, 3, dim=1) + W_Q = einops.rearrange(W_Q, "m (i h)->i m h", i=cfg.n_heads) + W_K = einops.rearrange(W_K, "m (i h)->i m h", i=cfg.n_heads) + W_V = einops.rearrange(W_V, "m (i h)->i m h", i=cfg.n_heads) + + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.W_K"] = W_K + state_dict[f"blocks.{l}.attn.W_V"] = W_V + + qkv_bias = gpt2.transformer.h[l].attn.c_attn.bias + qkv_bias = einops.rearrange( + qkv_bias, + "(qkv index head)->qkv index head", + qkv=3, + index=cfg.n_heads, + head=cfg.d_head, + ) + state_dict[f"blocks.{l}.attn.b_Q"] = qkv_bias[0] + state_dict[f"blocks.{l}.attn.b_K"] = qkv_bias[1] + state_dict[f"blocks.{l}.attn.b_V"] = qkv_bias[2] + + W_O = gpt2.transformer.h[l].attn.c_proj.weight + W_O = einops.rearrange(W_O, "(i h) m->i h m", i=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + state_dict[f"blocks.{l}.attn.b_O"] = gpt2.transformer.h[l].attn.c_proj.bias + + state_dict[f"blocks.{l}.ln2.w"] = gpt2.transformer.h[l].ln_2.weight + state_dict[f"blocks.{l}.ln2.b"] = gpt2.transformer.h[l].ln_2.bias + + W_in = gpt2.transformer.h[l].mlp.c_fc.weight + state_dict[f"blocks.{l}.mlp.W_in"] = W_in + state_dict[f"blocks.{l}.mlp.b_in"] = gpt2.transformer.h[l].mlp.c_fc.bias + + W_out = gpt2.transformer.h[l].mlp.c_proj.weight + state_dict[f"blocks.{l}.mlp.W_out"] = W_out + state_dict[f"blocks.{l}.mlp.b_out"] = gpt2.transformer.h[l].mlp.c_proj.bias + state_dict["unembed.W_U"] = gpt2.lm_head.weight.T + + state_dict["ln_final.w"] = gpt2.transformer.ln_f.weight + state_dict["ln_final.b"] = gpt2.transformer.ln_f.bias + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/gptj.py b/transformer_lens/pretrained/weight_conversions/gptj.py new file mode 100644 index 0000000..f01f779 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/gptj.py @@ -0,0 +1,50 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_gptj_weights(gptj, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = gptj.transformer.wte.weight + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = gptj.transformer.h[l].ln_1.weight + state_dict[f"blocks.{l}.ln1.b"] = gptj.transformer.h[l].ln_1.bias + + W_Q = gptj.transformer.h[l].attn.q_proj.weight + W_K = gptj.transformer.h[l].attn.k_proj.weight + W_V = gptj.transformer.h[l].attn.v_proj.weight + W_Q = einops.rearrange(W_Q, "(i h) m->i m h", i=cfg.n_heads) + W_K = einops.rearrange(W_K, "(i h) m->i m h", i=cfg.n_heads) + W_V = einops.rearrange(W_V, "(i h) m->i m h", i=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.W_K"] = W_K + state_dict[f"blocks.{l}.attn.W_V"] = W_V + + state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + state_dict[f"blocks.{l}.attn.b_K"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + state_dict[f"blocks.{l}.attn.b_V"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + + W_O = gptj.transformer.h[l].attn.out_proj.weight + W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + # Layer Norm 1 and 2 are tied. + state_dict[f"blocks.{l}.ln2.w"] = state_dict[f"blocks.{l}.ln1.w"] + state_dict[f"blocks.{l}.ln2.b"] = state_dict[f"blocks.{l}.ln1.b"] + + state_dict[f"blocks.{l}.mlp.W_in"] = gptj.transformer.h[l].mlp.fc_in.weight.T + state_dict[f"blocks.{l}.mlp.b_in"] = gptj.transformer.h[l].mlp.fc_in.bias + + state_dict[f"blocks.{l}.mlp.W_out"] = gptj.transformer.h[l].mlp.fc_out.weight.T + state_dict[f"blocks.{l}.mlp.b_out"] = gptj.transformer.h[l].mlp.fc_out.bias + state_dict["ln_final.w"] = gptj.transformer.ln_f.weight + state_dict["ln_final.b"] = gptj.transformer.ln_f.bias + + state_dict["unembed.W_U"] = gptj.lm_head.weight.T + # Contains a bias, for some reason? + state_dict["unembed.b_U"] = gptj.lm_head.bias + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/llama.py b/transformer_lens/pretrained/weight_conversions/llama.py new file mode 100644 index 0000000..d837c25 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/llama.py @@ -0,0 +1,96 @@ +from typing import cast + +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_llama_weights(llama, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = llama.model.embed_tokens.weight + + # Some models with the Llama architecture use Grouped Query Attention, and so for these we need to modify + # the state dict keys for the K/V attention weight/biases, prepending "_" to the key names. + using_gqa = cfg.n_key_value_heads is not None + gqa_uscore = "_" if using_gqa else "" + # need a cast since MyPy isn't smart enough to realize that using_gqa implies n_key_value_heads is not None + n_kv_heads = cast(int, cfg.n_key_value_heads if using_gqa else cfg.n_heads) + + # llama has no biases anywhere and deals with everything else roughly like + # GPTNeoX with different names + + assert cfg.d_mlp is not None # keep mypy happy + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = llama.model.layers[l].input_layernorm.weight + + W_Q = llama.model.layers[l].self_attn.q_proj.weight + W_K = llama.model.layers[l].self_attn.k_proj.weight + W_V = llama.model.layers[l].self_attn.v_proj.weight + + # in case of quantization, + # parameters should stay as bitsandbytes.nn.modules.Params4bit + if not cfg.load_in_4bit: + W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) + W_K = einops.rearrange(W_K, "(n h) m->n m h", n=n_kv_heads) + W_V = einops.rearrange(W_V, "(n h) m->n m h", n=n_kv_heads) + + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.{gqa_uscore}W_K"] = W_K + state_dict[f"blocks.{l}.attn.{gqa_uscore}W_V"] = W_V + + state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros( + cfg.n_heads, cfg.d_head, dtype=cfg.dtype, device=cfg.device + ) + state_dict[f"blocks.{l}.attn.{gqa_uscore}b_K"] = torch.zeros( + n_kv_heads, + cfg.d_head, + dtype=cfg.dtype, + device=cfg.device, + ) + state_dict[f"blocks.{l}.attn.{gqa_uscore}b_V"] = torch.zeros( + n_kv_heads, + cfg.d_head, + dtype=cfg.dtype, + device=cfg.device, + ) + + W_O = llama.model.layers[l].self_attn.o_proj.weight + + if not cfg.load_in_4bit: + W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) + + state_dict[f"blocks.{l}.attn.W_O"] = W_O.to(device=cfg.device) + + state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros( + cfg.d_model, dtype=cfg.dtype, device=cfg.device + ) + + state_dict[f"blocks.{l}.ln2.w"] = llama.model.layers[l].post_attention_layernorm.weight + + # in case of quantization, + # parameters should stay as bitsandbytes.nn.modules.Params4bit + if not cfg.load_in_4bit: + state_dict[f"blocks.{l}.mlp.W_in"] = llama.model.layers[l].mlp.up_proj.weight.T + state_dict[f"blocks.{l}.mlp.W_gate"] = llama.model.layers[l].mlp.gate_proj.weight.T + state_dict[f"blocks.{l}.mlp.W_out"] = llama.model.layers[l].mlp.down_proj.weight.T + else: + state_dict[f"blocks.{l}.mlp.W_in"] = llama.model.layers[l].mlp.up_proj.weight + state_dict[f"blocks.{l}.mlp.W_gate"] = llama.model.layers[l].mlp.gate_proj.weight + state_dict[f"blocks.{l}.mlp.W_out"] = llama.model.layers[l].mlp.down_proj.weight + + state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros( + cfg.d_mlp, dtype=cfg.dtype, device=cfg.device + ) + state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros( + cfg.d_model, dtype=cfg.dtype, device=cfg.device + ) + + state_dict["ln_final.w"] = llama.model.norm.weight + + state_dict["unembed.W_U"] = llama.lm_head.weight.T + state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype, device=cfg.device) + + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/mingpt.py b/transformer_lens/pretrained/weight_conversions/mingpt.py new file mode 100644 index 0000000..84b2c17 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/mingpt.py @@ -0,0 +1,63 @@ +import einops + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_mingpt_weights(old_state_dict, cfg: HookedTransformerConfig): + # mingpt (https://github.com/karpathy/minGPT) is mostly similar to GPT-2, + # but doesn't concat the QKV matrices. + state_dict = {} + + state_dict["embed.W_E"] = old_state_dict["tok_emb.weight"] + state_dict["pos_embed.W_pos"] = old_state_dict["pos_emb"].squeeze() + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = old_state_dict[f"blocks.{l}.ln1.weight"] + state_dict[f"blocks.{l}.ln1.b"] = old_state_dict[f"blocks.{l}.ln1.bias"] + + W_Q = old_state_dict[f"blocks.{l}.attn.query.weight"] + W_K = old_state_dict[f"blocks.{l}.attn.key.weight"] + W_V = old_state_dict[f"blocks.{l}.attn.value.weight"] + W_Q = einops.rearrange(W_Q, "(i h) m->i m h", i=cfg.n_heads) + W_K = einops.rearrange(W_K, "(i h) m->i m h", i=cfg.n_heads) + W_V = einops.rearrange(W_V, "(i h) m->i m h", i=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.W_K"] = W_K + state_dict[f"blocks.{l}.attn.W_V"] = W_V + + q_bias = einops.rearrange( + old_state_dict[f"blocks.{l}.attn.query.bias"], "(i h)->i h", i=cfg.n_heads + ) + k_bias = einops.rearrange( + old_state_dict[f"blocks.{l}.attn.key.bias"], "(i h)->i h", i=cfg.n_heads + ) + v_bias = einops.rearrange( + old_state_dict[f"blocks.{l}.attn.value.bias"], "(i h)->i h", i=cfg.n_heads + ) + + state_dict[f"blocks.{l}.attn.b_Q"] = q_bias + state_dict[f"blocks.{l}.attn.b_K"] = k_bias + state_dict[f"blocks.{l}.attn.b_V"] = v_bias + + W_O = old_state_dict[f"blocks.{l}.attn.proj.weight"] + W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + state_dict[f"blocks.{l}.attn.b_O"] = old_state_dict[f"blocks.{l}.attn.proj.bias"] + + state_dict[f"blocks.{l}.ln2.w"] = old_state_dict[f"blocks.{l}.ln2.weight"] + state_dict[f"blocks.{l}.ln2.b"] = old_state_dict[f"blocks.{l}.ln2.bias"] + + W_in = old_state_dict[f"blocks.{l}.mlp.0.weight"] + state_dict[f"blocks.{l}.mlp.W_in"] = W_in.T + state_dict[f"blocks.{l}.mlp.b_in"] = old_state_dict[f"blocks.{l}.mlp.0.bias"] + + W_out = old_state_dict[f"blocks.{l}.mlp.2.weight"] + state_dict[f"blocks.{l}.mlp.W_out"] = W_out.T + state_dict[f"blocks.{l}.mlp.b_out"] = old_state_dict[f"blocks.{l}.mlp.2.bias"] + + state_dict["unembed.W_U"] = old_state_dict["head.weight"].T + + state_dict["ln_final.w"] = old_state_dict["ln_f.weight"] + state_dict["ln_final.b"] = old_state_dict["ln_f.bias"] + + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/mistral.py b/transformer_lens/pretrained/weight_conversions/mistral.py new file mode 100644 index 0000000..5dce50a --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/mistral.py @@ -0,0 +1,57 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_mistral_weights(mistral, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = mistral.model.embed_tokens.weight + + assert cfg.n_key_value_heads is not None # keep mypy happy + assert cfg.d_mlp is not None # keep mypy happy + + # Mistral has no biases anywhere + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = mistral.model.layers[l].input_layernorm.weight + + W_Q = mistral.model.layers[l].self_attn.q_proj.weight + W_K = mistral.model.layers[l].self_attn.k_proj.weight + W_V = mistral.model.layers[l].self_attn.v_proj.weight + W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) + W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) + W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn._W_K"] = W_K + state_dict[f"blocks.{l}.attn._W_V"] = W_V + + state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + state_dict[f"blocks.{l}.attn._b_K"] = torch.zeros( + cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype + ) + state_dict[f"blocks.{l}.attn._b_V"] = torch.zeros( + cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype + ) + + W_O = mistral.model.layers[l].self_attn.o_proj.weight + W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + + state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + state_dict[f"blocks.{l}.ln2.w"] = mistral.model.layers[l].post_attention_layernorm.weight + + state_dict[f"blocks.{l}.mlp.W_in"] = mistral.model.layers[l].mlp.up_proj.weight.T + state_dict[f"blocks.{l}.mlp.W_gate"] = mistral.model.layers[l].mlp.gate_proj.weight.T + state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) + + state_dict[f"blocks.{l}.mlp.W_out"] = mistral.model.layers[l].mlp.down_proj.weight.T + state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + state_dict["ln_final.w"] = mistral.model.norm.weight + + state_dict["unembed.W_U"] = mistral.lm_head.weight.T + state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/mixtral.py b/transformer_lens/pretrained/weight_conversions/mixtral.py new file mode 100644 index 0000000..bf46dad --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/mixtral.py @@ -0,0 +1,73 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_mixtral_weights(mixtral, cfg: HookedTransformerConfig): + # The same as Mistral, but with the MLP replaced with MoE + # As with Mistral, Mixtral has no biases + + state_dict = {} + + assert cfg.n_key_value_heads is not None # keep mypy happy + assert cfg.d_mlp is not None + assert cfg.num_experts is not None + + state_dict["embed.W_E"] = mixtral.model.embed_tokens.weight + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = mixtral.model.layers[l].input_layernorm.weight + + W_Q = mixtral.model.layers[l].self_attn.q_proj.weight + W_K = mixtral.model.layers[l].self_attn.k_proj.weight + W_V = mixtral.model.layers[l].self_attn.v_proj.weight + W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) + W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) + W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn._W_K"] = W_K + state_dict[f"blocks.{l}.attn._W_V"] = W_V + + state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + state_dict[f"blocks.{l}.attn._b_K"] = torch.zeros( + cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype + ) + state_dict[f"blocks.{l}.attn._b_V"] = torch.zeros( + cfg.n_key_value_heads, cfg.d_head, dtype=cfg.dtype + ) + + W_O = mixtral.model.layers[l].self_attn.o_proj.weight + W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + + state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + state_dict[f"blocks.{l}.ln2.w"] = mixtral.model.layers[l].post_attention_layernorm.weight + + state_dict[f"blocks.{l}.mlp.W_gate.weight"] = mixtral.model.layers[ + l + ].block_sparse_moe.gate.weight + + # The mapping here from wn to W_{in/out/gate} is a bit confusing: + # w1 -> W_gate + # w2 -> W_out + # w3 -> W_in + # See https://github.com/mistralai/mistral-inference/blob/8598cf582091a596671be31990448e0620017851/mistral/model.py#L128 for reference + for e in range(cfg.num_experts): + state_dict[f"blocks.{l}.mlp.experts.{e}.W_in.weight"] = ( + mixtral.model.layers[l].block_sparse_moe.experts[e].w3.weight + ) + state_dict[f"blocks.{l}.mlp.experts.{e}.W_gate.weight"] = ( + mixtral.model.layers[l].block_sparse_moe.experts[e].w1.weight + ) + state_dict[f"blocks.{l}.mlp.experts.{e}.W_out.weight"] = ( + mixtral.model.layers[l].block_sparse_moe.experts[e].w2.weight + ) + + state_dict["ln_final.w"] = mixtral.model.norm.weight.data + + state_dict["unembed.W_U"] = mixtral.lm_head.weight.T + state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/nanogpt.py b/transformer_lens/pretrained/weight_conversions/nanogpt.py new file mode 100644 index 0000000..97adfd9 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/nanogpt.py @@ -0,0 +1,88 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_nanogpt_weights(old_state_dict, cfg: HookedTransformerConfig): + """For https://github.com/karpathy/nanoGPT + There are two complications with converting nanogpt models: + The first is that some state dicts have an unwanted prefix on keys that needs to be removed. + The second is that the models can be saved with or without bias. By default, there + is no bias. This function can handle both cases.""" + # Nanogpt models saved after torch.compile() have this unwanted prefix + # This is a simple way to remove it + unwanted_prefix = "_orig_mod." + for k, v in list(old_state_dict.items()): + if k.startswith(unwanted_prefix): + old_state_dict[k[len(unwanted_prefix) :]] = old_state_dict.pop(k) + + new_state_dict = {} + new_state_dict["pos_embed.W_pos"] = old_state_dict["transformer.wpe.weight"] + new_state_dict["embed.W_E"] = old_state_dict["transformer.wte.weight"] + + new_state_dict["ln_final.w"] = old_state_dict["transformer.ln_f.weight"] + new_state_dict["ln_final.b"] = torch.zeros_like(old_state_dict["transformer.ln_f.weight"]) + new_state_dict["unembed.W_U"] = old_state_dict["lm_head.weight"].T + + bias = False + if "transformer.ln_f.bias" in old_state_dict: + bias = True + new_state_dict["ln_final.b"] = old_state_dict["transformer.ln_f.bias"] + + for layer in range(cfg.n_layers): + layer_key = f"transformer.h.{layer}" + + new_state_dict[f"blocks.{layer}.ln1.w"] = old_state_dict[f"{layer_key}.ln_1.weight"] + # A bias of zeros is required for folding layer norm + new_state_dict[f"blocks.{layer}.ln1.b"] = torch.zeros_like( + old_state_dict[f"{layer_key}.ln_1.weight"] + ) + new_state_dict[f"blocks.{layer}.ln2.w"] = old_state_dict[f"{layer_key}.ln_2.weight"] + new_state_dict[f"blocks.{layer}.ln2.b"] = torch.zeros_like( + old_state_dict[f"{layer_key}.ln_2.weight"] + ) + + W = old_state_dict[f"{layer_key}.attn.c_attn.weight"] + W_Q, W_K, W_V = torch.tensor_split(W, 3, dim=0) + W_Q = einops.rearrange(W_Q, "(i h) m->i m h", i=cfg.n_heads) + W_K = einops.rearrange(W_K, "(i h) m->i m h", i=cfg.n_heads) + W_V = einops.rearrange(W_V, "(i h) m->i m h", i=cfg.n_heads) + new_state_dict[f"blocks.{layer}.attn.W_Q"] = W_Q + new_state_dict[f"blocks.{layer}.attn.W_K"] = W_K + new_state_dict[f"blocks.{layer}.attn.W_V"] = W_V + + W_O = old_state_dict[f"{layer_key}.attn.c_proj.weight"] + W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) + new_state_dict[f"blocks.{layer}.attn.W_O"] = W_O + + new_state_dict[f"blocks.{layer}.mlp.W_in"] = old_state_dict[ + f"{layer_key}.mlp.c_fc.weight" + ].T + new_state_dict[f"blocks.{layer}.mlp.W_out"] = old_state_dict[ + f"{layer_key}.mlp.c_proj.weight" + ].T + + if bias: + new_state_dict[f"blocks.{layer}.ln1.b"] = old_state_dict[f"{layer_key}.ln_1.bias"] + new_state_dict[f"blocks.{layer}.ln2.b"] = old_state_dict[f"{layer_key}.ln_2.bias"] + new_state_dict[f"blocks.{layer}.mlp.b_in"] = old_state_dict[ + f"{layer_key}.mlp.c_fc.bias" + ] + new_state_dict[f"blocks.{layer}.mlp.b_out"] = old_state_dict[ + f"{layer_key}.mlp.c_proj.bias" + ] + + B = old_state_dict[f"{layer_key}.attn.c_attn.bias"] + B_Q, B_K, B_V = torch.tensor_split(B, 3, dim=0) + B_Q = einops.rearrange(B_Q, "(i h)->i h", i=cfg.n_heads) + B_K = einops.rearrange(B_K, "(i h)->i h", i=cfg.n_heads) + B_V = einops.rearrange(B_V, "(i h)->i h", i=cfg.n_heads) + new_state_dict[f"blocks.{layer}.attn.b_Q"] = B_Q + new_state_dict[f"blocks.{layer}.attn.b_K"] = B_K + new_state_dict[f"blocks.{layer}.attn.b_V"] = B_V + new_state_dict[f"blocks.{layer}.attn.b_O"] = old_state_dict[ + f"{layer_key}.attn.c_proj.bias" + ] + + return new_state_dict diff --git a/transformer_lens/pretrained/weight_conversions/neel_solu_old.py b/transformer_lens/pretrained/weight_conversions/neel_solu_old.py new file mode 100644 index 0000000..d7089c5 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/neel_solu_old.py @@ -0,0 +1,38 @@ +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_neel_solu_old_weights(state_dict: dict, cfg: HookedTransformerConfig): + """ + Converts the weights of my old SoLU models to the HookedTransformer format. + Takes as input a state dict, *not* a model object. + + There are a bunch of dumb bugs in the original code, sorry! + + Models 1L, 2L, 4L and 6L have left facing weights (ie, weights have shape + [dim_out, dim_in]) while HookedTransformer does right facing (ie [dim_in, + dim_out]). + + 8L has *just* a left facing W_pos, the rest right facing. + + And some models were trained with + """ + # Early models have left facing W_pos + reverse_pos = cfg.n_layers <= 8 + + # Models prior to 8L have left facing everything (8L has JUST left facing W_pos - sorry! Stupid bug) + reverse_weights = cfg.n_layers <= 6 + + new_state_dict = {} + for k, v in state_dict.items(): + k = k.replace("norm", "ln") + if k.startswith("ln."): + k = k.replace("ln.", "ln_final.") + new_state_dict[k] = v + + if reverse_pos: + new_state_dict["pos_embed.W_pos"] = new_state_dict["pos_embed.W_pos"].T + if reverse_weights: + for k, v in new_state_dict.items(): + if "W_" in k and "W_pos" not in k: + new_state_dict[k] = v.transpose(-2, -1) + return new_state_dict diff --git a/transformer_lens/pretrained/weight_conversions/neo.py b/transformer_lens/pretrained/weight_conversions/neo.py new file mode 100644 index 0000000..0f9432e --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/neo.py @@ -0,0 +1,49 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_neo_weights(neo, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = neo.transformer.wte.weight + state_dict["pos_embed.W_pos"] = neo.transformer.wpe.weight + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = neo.transformer.h[l].ln_1.weight + state_dict[f"blocks.{l}.ln1.b"] = neo.transformer.h[l].ln_1.bias + + W_Q = neo.transformer.h[l].attn.attention.q_proj.weight + W_K = neo.transformer.h[l].attn.attention.k_proj.weight + W_V = neo.transformer.h[l].attn.attention.v_proj.weight + W_Q = einops.rearrange(W_Q, "(i h) m->i m h", i=cfg.n_heads) + W_K = einops.rearrange(W_K, "(i h) m->i m h", i=cfg.n_heads) + W_V = einops.rearrange(W_V, "(i h) m->i m h", i=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.W_K"] = W_K + state_dict[f"blocks.{l}.attn.W_V"] = W_V + + state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + state_dict[f"blocks.{l}.attn.b_K"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + state_dict[f"blocks.{l}.attn.b_V"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + + W_O = neo.transformer.h[l].attn.attention.out_proj.weight + W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + state_dict[f"blocks.{l}.attn.b_O"] = neo.transformer.h[l].attn.attention.out_proj.bias + + state_dict[f"blocks.{l}.ln2.w"] = neo.transformer.h[l].ln_2.weight + state_dict[f"blocks.{l}.ln2.b"] = neo.transformer.h[l].ln_2.bias + + state_dict[f"blocks.{l}.mlp.W_in"] = neo.transformer.h[l].mlp.c_fc.weight.T + state_dict[f"blocks.{l}.mlp.b_in"] = neo.transformer.h[l].mlp.c_fc.bias + + state_dict[f"blocks.{l}.mlp.W_out"] = neo.transformer.h[l].mlp.c_proj.weight.T + state_dict[f"blocks.{l}.mlp.b_out"] = neo.transformer.h[l].mlp.c_proj.bias + state_dict["ln_final.w"] = neo.transformer.ln_f.weight + state_dict["ln_final.b"] = neo.transformer.ln_f.bias + + state_dict["unembed.W_U"] = neo.lm_head.weight.T + state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/neox.py b/transformer_lens/pretrained/weight_conversions/neox.py new file mode 100644 index 0000000..0238266 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/neox.py @@ -0,0 +1,59 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_neox_weights(neox, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = neox.gpt_neox.embed_in.weight + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = neox.gpt_neox.layers[l].input_layernorm.weight + state_dict[f"blocks.{l}.ln1.b"] = neox.gpt_neox.layers[l].input_layernorm.bias + + # For some inexplicable reason, NeoX both uses the concatenated QKV + # matmul of GPT-2 (afaict this has a neglible performance impact) AND + # has the flattened axis in the DIFFERENT order of (head_index qkv + # d_head) - this took me an hour to debug... + W = neox.gpt_neox.layers[l].attention.query_key_value.weight + W = einops.rearrange(W, "(i qkv h) m->qkv i m h", i=cfg.n_heads, qkv=3) + + # Fold in layer norm weights + state_dict[f"blocks.{l}.attn.W_Q"] = W[0] + state_dict[f"blocks.{l}.attn.W_K"] = W[1] + state_dict[f"blocks.{l}.attn.W_V"] = W[2] + + qkv_bias = neox.gpt_neox.layers[l].attention.query_key_value.bias + qkv_bias = einops.rearrange( + qkv_bias, + "(index qkv head)->qkv index head", + qkv=3, + index=cfg.n_heads, + head=cfg.d_head, + ) + # Fold in layer norm biases + state_dict[f"blocks.{l}.attn.b_Q"] = qkv_bias[0] + state_dict[f"blocks.{l}.attn.b_K"] = qkv_bias[1] + state_dict[f"blocks.{l}.attn.b_V"] = qkv_bias[2] + + W_O = neox.gpt_neox.layers[l].attention.dense.weight + W_O = einops.rearrange(W_O, "m (i h)->i h m", i=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + state_dict[f"blocks.{l}.attn.b_O"] = neox.gpt_neox.layers[l].attention.dense.bias + + state_dict[f"blocks.{l}.ln2.w"] = neox.gpt_neox.layers[l].post_attention_layernorm.weight + state_dict[f"blocks.{l}.ln2.b"] = neox.gpt_neox.layers[l].post_attention_layernorm.bias + + state_dict[f"blocks.{l}.mlp.W_in"] = neox.gpt_neox.layers[l].mlp.dense_h_to_4h.weight.T + state_dict[f"blocks.{l}.mlp.b_in"] = neox.gpt_neox.layers[l].mlp.dense_h_to_4h.bias + + state_dict[f"blocks.{l}.mlp.W_out"] = neox.gpt_neox.layers[l].mlp.dense_4h_to_h.weight.T + state_dict[f"blocks.{l}.mlp.b_out"] = neox.gpt_neox.layers[l].mlp.dense_4h_to_h.bias + state_dict["ln_final.w"] = neox.gpt_neox.final_layer_norm.weight + state_dict["ln_final.b"] = neox.gpt_neox.final_layer_norm.bias + + state_dict["unembed.W_U"] = neox.embed_out.weight.T + state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/opt.py b/transformer_lens/pretrained/weight_conversions/opt.py new file mode 100644 index 0000000..23efde0 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/opt.py @@ -0,0 +1,84 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_opt_weights(opt, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = opt.model.decoder.embed_tokens.weight + state_dict["pos_embed.W_pos"] = opt.model.decoder.embed_positions.weight[2:, :] + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = opt.model.decoder.layers[l].self_attn_layer_norm.weight + state_dict[f"blocks.{l}.ln1.b"] = opt.model.decoder.layers[l].self_attn_layer_norm.bias + + W_Q = opt.model.decoder.layers[l].self_attn.q_proj.weight + W_K = opt.model.decoder.layers[l].self_attn.k_proj.weight + W_V = opt.model.decoder.layers[l].self_attn.v_proj.weight + W_Q = einops.rearrange( + W_Q, + "(index d_head) d_model->index d_model d_head", + index=cfg.n_heads, + ) + W_K = einops.rearrange( + W_K, + "(index d_head) d_model->index d_model d_head", + index=cfg.n_heads, + ) + W_V = einops.rearrange( + W_V, + "(index d_head) d_model->index d_model d_head", + index=cfg.n_heads, + ) + + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.W_K"] = W_K + state_dict[f"blocks.{l}.attn.W_V"] = W_V + + q_bias = einops.rearrange( + opt.model.decoder.layers[l].self_attn.q_proj.bias, + "(head_index d_head)->head_index d_head", + head_index=cfg.n_heads, + d_head=cfg.d_head, + ) + k_bias = einops.rearrange( + opt.model.decoder.layers[l].self_attn.k_proj.bias, + "(head_index d_head)->head_index d_head", + head_index=cfg.n_heads, + d_head=cfg.d_head, + ) + v_bias = einops.rearrange( + opt.model.decoder.layers[l].self_attn.v_proj.bias, + "(head_index d_head)->head_index d_head", + head_index=cfg.n_heads, + d_head=cfg.d_head, + ) + + state_dict[f"blocks.{l}.attn.b_Q"] = q_bias + state_dict[f"blocks.{l}.attn.b_K"] = k_bias + state_dict[f"blocks.{l}.attn.b_V"] = v_bias + + W_O = opt.model.decoder.layers[l].self_attn.out_proj.weight + W_O = einops.rearrange( + W_O, + "d_model (index d_head)->index d_head d_model", + index=cfg.n_heads, + ) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + state_dict[f"blocks.{l}.attn.b_O"] = opt.model.decoder.layers[l].self_attn.out_proj.bias + + state_dict[f"blocks.{l}.ln2.w"] = opt.model.decoder.layers[l].final_layer_norm.weight + state_dict[f"blocks.{l}.ln2.b"] = opt.model.decoder.layers[l].final_layer_norm.bias + + state_dict[f"blocks.{l}.mlp.W_in"] = opt.model.decoder.layers[l].fc1.weight.T + state_dict[f"blocks.{l}.mlp.W_out"] = opt.model.decoder.layers[l].fc2.weight.T + + state_dict[f"blocks.{l}.mlp.b_in"] = opt.model.decoder.layers[l].fc1.bias + state_dict[f"blocks.{l}.mlp.b_out"] = opt.model.decoder.layers[l].fc2.bias + state_dict["ln_final.w"] = opt.model.decoder.final_layer_norm.weight + state_dict["ln_final.b"] = opt.model.decoder.final_layer_norm.bias + state_dict["unembed.W_U"] = opt.lm_head.weight.T + state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/phi.py b/transformer_lens/pretrained/weight_conversions/phi.py new file mode 100644 index 0000000..5baabe1 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/phi.py @@ -0,0 +1,64 @@ +import einops + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_phi_weights(phi, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = phi.model.embed_tokens.weight + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = phi.model.layers[l].input_layernorm.weight + state_dict[f"blocks.{l}.ln1.b"] = phi.model.layers[l].input_layernorm.bias + + W_Q = phi.model.layers[l].self_attn.q_proj.weight + W_K = phi.model.layers[l].self_attn.k_proj.weight + W_V = phi.model.layers[l].self_attn.v_proj.weight + W_Q = einops.rearrange( + W_Q, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads + ) + W_K = einops.rearrange( + W_K, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads + ) + W_V = einops.rearrange( + W_V, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads + ) + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.W_K"] = W_K + state_dict[f"blocks.{l}.attn.W_V"] = W_V + + b_Q = phi.model.layers[l].self_attn.q_proj.bias + b_K = phi.model.layers[l].self_attn.k_proj.bias + b_V = phi.model.layers[l].self_attn.v_proj.bias + b_Q = einops.rearrange(b_Q, "(n_head d_head) -> n_head d_head", n_head=cfg.n_heads) + b_K = einops.rearrange(b_K, "(n_head d_head) -> n_head d_head", n_head=cfg.n_heads) + b_V = einops.rearrange(b_V, "(n_head d_head) -> n_head d_head", n_head=cfg.n_heads) + state_dict[f"blocks.{l}.attn.b_Q"] = b_Q + state_dict[f"blocks.{l}.attn.b_K"] = b_K + state_dict[f"blocks.{l}.attn.b_V"] = b_V + + W_O = phi.model.layers[l].self_attn.dense.weight + W_O = einops.rearrange( + W_O, "d_model (n_head d_head) -> n_head d_head d_model", n_head=cfg.n_heads + ) + + state_dict[f"blocks.{l}.attn.W_O"] = W_O + state_dict[f"blocks.{l}.attn.b_O"] = phi.model.layers[l].self_attn.dense.bias + + # Layer Norm 1 and 2 are tied. + state_dict[f"blocks.{l}.ln2.w"] = state_dict[f"blocks.{l}.ln1.w"] + state_dict[f"blocks.{l}.ln2.b"] = state_dict[f"blocks.{l}.ln1.b"] + + state_dict[f"blocks.{l}.mlp.W_in"] = phi.model.layers[l].mlp.fc1.weight.T + state_dict[f"blocks.{l}.mlp.b_in"] = phi.model.layers[l].mlp.fc1.bias + state_dict[f"blocks.{l}.mlp.W_out"] = phi.model.layers[l].mlp.fc2.weight.T + state_dict[f"blocks.{l}.mlp.b_out"] = phi.model.layers[l].mlp.fc2.bias + + state_dict["ln_final.w"] = phi.model.final_layernorm.weight + state_dict["ln_final.b"] = phi.model.final_layernorm.bias + + state_dict["unembed.W_U"] = phi.lm_head.weight.T + state_dict["unembed.b_U"] = phi.lm_head.bias + + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/phi3.py b/transformer_lens/pretrained/weight_conversions/phi3.py new file mode 100644 index 0000000..b15ffe7 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/phi3.py @@ -0,0 +1,56 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_phi3_weights(phi, cfg: HookedTransformerConfig): + state_dict = {} + + state_dict["embed.W_E"] = phi.model.embed_tokens.weight + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = phi.model.layers[l].input_layernorm.weight + state_dict[f"blocks.{l}.ln1.b"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + + W = phi.model.layers[l].self_attn.qkv_proj.weight + W_Q, W_K, W_V = torch.tensor_split(W, 3, dim=0) + W_Q = einops.rearrange( + W_Q, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads + ) + W_K = einops.rearrange( + W_K, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads + ) + W_V = einops.rearrange( + W_V, "(n_head d_head) d_model -> n_head d_model d_head", n_head=cfg.n_heads + ) + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.b_Q"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + state_dict[f"blocks.{l}.attn.W_K"] = W_K + state_dict[f"blocks.{l}.attn.b_K"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + state_dict[f"blocks.{l}.attn.W_V"] = W_V + state_dict[f"blocks.{l}.attn.b_V"] = torch.zeros(cfg.n_heads, cfg.d_head, dtype=cfg.dtype) + + W_O = phi.model.layers[l].self_attn.o_proj.weight + W_O = einops.rearrange( + W_O, "d_model (n_head d_head) -> n_head d_head d_model", n_head=cfg.n_heads + ) + + state_dict[f"blocks.{l}.attn.W_O"] = W_O + state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + state_dict[f"blocks.{l}.ln2.w"] = phi.model.layers[l].post_attention_layernorm.weight + state_dict[f"blocks.{l}.ln2.b"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + + W = phi.model.layers[l].mlp.gate_up_proj.weight.T + W_gate, W_in = torch.tensor_split(W, 2, dim=1) + state_dict[f"blocks.{l}.mlp.W_in"] = W_in + state_dict[f"blocks.{l}.mlp.W_gate"] = W_gate + state_dict[f"blocks.{l}.mlp.W_out"] = phi.model.layers[l].mlp.down_proj.weight.T + + state_dict["ln_final.w"] = phi.model.norm.weight + + state_dict["unembed.W_U"] = phi.lm_head.weight.T + state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/qwen.py b/transformer_lens/pretrained/weight_conversions/qwen.py new file mode 100644 index 0000000..f6b0b83 --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/qwen.py @@ -0,0 +1,65 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_qwen_weights(qwen, cfg: HookedTransformerConfig): + state_dict = {} + model = qwen.transformer + state_dict["embed.W_E"] = model.wte.weight + + assert cfg.d_mlp is not None # keep mypy happy + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = model.h[l].ln_1.weight + + W_Q, W_K, W_V = model.h[l].attn.c_attn.weight.split(split_size=cfg.d_model, dim=0) + W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) + W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_heads) + W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn.W_K"] = W_K + state_dict[f"blocks.{l}.attn.W_V"] = W_V + + b_Q, b_K, b_V = model.h[l].attn.c_attn.bias.split(split_size=cfg.d_model, dim=0) + b_Q = einops.rearrange( + b_Q, + "(n_head d_head) -> n_head d_head", + n_head=cfg.n_heads, + ) + b_K = einops.rearrange( + b_K, + "(n_head d_head) -> n_head d_head", + n_head=cfg.n_heads, + ) + b_V = einops.rearrange( + b_V, + "(n_head d_head) -> n_head d_head", + n_head=cfg.n_heads, + ) + state_dict[f"blocks.{l}.attn.b_Q"] = b_Q + state_dict[f"blocks.{l}.attn.b_K"] = b_K + state_dict[f"blocks.{l}.attn.b_V"] = b_V + + W_O = model.h[l].attn.c_proj.weight + W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + + state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + state_dict[f"blocks.{l}.ln2.w"] = model.h[l].ln_2.weight + + state_dict[f"blocks.{l}.mlp.W_in"] = model.h[l].mlp.w1.weight.T + state_dict[f"blocks.{l}.mlp.W_gate"] = model.h[l].mlp.w2.weight.T + state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) + + state_dict[f"blocks.{l}.mlp.W_out"] = model.h[l].mlp.c_proj.weight.T + state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + state_dict["ln_final.w"] = model.ln_f.weight + + state_dict["unembed.W_U"] = qwen.lm_head.weight.T + state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/qwen2.py b/transformer_lens/pretrained/weight_conversions/qwen2.py new file mode 100644 index 0000000..ab81ada --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/qwen2.py @@ -0,0 +1,76 @@ +import einops +import torch + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_qwen2_weights(qwen, cfg: HookedTransformerConfig): + # Note that this method is also applied for Qwen1.5 models, since they + # have architecture type Qwen2ForCausalLM. + + state_dict = {} + + state_dict["embed.W_E"] = qwen.model.embed_tokens.weight + + assert cfg.d_mlp is not None # keep mypy happy + + for l in range(cfg.n_layers): + state_dict[f"blocks.{l}.ln1.w"] = qwen.model.layers[l].input_layernorm.weight + + W_Q = qwen.model.layers[l].self_attn.q_proj.weight + W_K = qwen.model.layers[l].self_attn.k_proj.weight + W_V = qwen.model.layers[l].self_attn.v_proj.weight + W_Q = einops.rearrange(W_Q, "(n h) m->n m h", n=cfg.n_heads) + W_K = einops.rearrange(W_K, "(n h) m->n m h", n=cfg.n_key_value_heads) + W_V = einops.rearrange(W_V, "(n h) m->n m h", n=cfg.n_key_value_heads) + + state_dict[f"blocks.{l}.attn.W_Q"] = W_Q + state_dict[f"blocks.{l}.attn._W_K"] = W_K + state_dict[f"blocks.{l}.attn._W_V"] = W_V + + b_Q = qwen.model.layers[l].self_attn.q_proj.bias + b_Q = einops.rearrange( + b_Q, + "(n_head d_head) -> n_head d_head", + n_head=cfg.n_heads, + ) + + b_K = qwen.model.layers[l].self_attn.k_proj.bias + b_K = einops.rearrange( + b_K, + "(n_head d_head) -> n_head d_head", + n_head=cfg.n_key_value_heads, + ) + + b_V = qwen.model.layers[l].self_attn.v_proj.bias + b_V = einops.rearrange( + b_V, + "(n_head d_head) -> n_head d_head", + n_head=cfg.n_key_value_heads, + ) + + state_dict[f"blocks.{l}.attn.b_Q"] = b_Q + state_dict[f"blocks.{l}.attn._b_K"] = b_K + state_dict[f"blocks.{l}.attn._b_V"] = b_V + + W_O = qwen.model.layers[l].self_attn.o_proj.weight + W_O = einops.rearrange(W_O, "m (n h)->n h m", n=cfg.n_heads) + state_dict[f"blocks.{l}.attn.W_O"] = W_O + + state_dict[f"blocks.{l}.attn.b_O"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + state_dict[f"blocks.{l}.ln2.w"] = qwen.model.layers[l].post_attention_layernorm.weight + + state_dict[f"blocks.{l}.mlp.W_in"] = qwen.model.layers[l].mlp.up_proj.weight.T + state_dict[f"blocks.{l}.mlp.W_gate"] = qwen.model.layers[l].mlp.gate_proj.weight.T + state_dict[f"blocks.{l}.mlp.b_in"] = torch.zeros(cfg.d_mlp, dtype=cfg.dtype) + + state_dict[f"blocks.{l}.mlp.W_out"] = qwen.model.layers[l].mlp.down_proj.weight.T + state_dict[f"blocks.{l}.mlp.b_out"] = torch.zeros(cfg.d_model, dtype=cfg.dtype) + + state_dict["ln_final.w"] = qwen.model.norm.weight + + state_dict["unembed.W_U"] = qwen.lm_head.weight.T + state_dict["unembed.b_U"] = torch.zeros(cfg.d_vocab, dtype=cfg.dtype) + + return state_dict diff --git a/transformer_lens/pretrained/weight_conversions/t5.py b/transformer_lens/pretrained/weight_conversions/t5.py new file mode 100644 index 0000000..365054a --- /dev/null +++ b/transformer_lens/pretrained/weight_conversions/t5.py @@ -0,0 +1,101 @@ +import einops + +from transformer_lens.HookedTransformerConfig import HookedTransformerConfig + + +def convert_t5_weights(t5, cfg: HookedTransformerConfig): + state_dict = { + "embed.W_E": t5.encoder.embed_tokens.weight, + "unembed.W_U": t5.encoder.embed_tokens.weight.T, + "encoder.0.attn.rel_pos_bias.weight": t5.encoder.block[0] + .layer[0] + .SelfAttention.relative_attention_bias.weight, + } + + for l in range(cfg.n_layers): + block = t5.encoder.block[l] + state_dict[f"encoder.{l}.attn.W_Q"] = einops.rearrange( + block.layer[0].SelfAttention.q.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + state_dict[f"encoder.{l}.attn.W_K"] = einops.rearrange( + block.layer[0].SelfAttention.k.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + + state_dict[f"encoder.{l}.attn.W_V"] = einops.rearrange( + block.layer[0].SelfAttention.v.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + + state_dict[f"encoder.{l}.attn.W_O"] = einops.rearrange( + block.layer[0].SelfAttention.o.weight, + "m (i h) -> i h m", + i=cfg.n_heads, + ) + state_dict[f"encoder.{l}.ln1.w"] = block.layer[0].layer_norm.weight + + # fixme DenseReluDense may be T5DenseGatedActDense instead + state_dict[f"encoder.{l}.mlp.W_in"] = einops.rearrange( + block.layer[1].DenseReluDense.wi.weight, "mlp model -> model mlp" + ) + + state_dict[f"encoder.{l}.mlp.W_out"] = einops.rearrange( + block.layer[1].DenseReluDense.wo.weight, "model mlp -> mlp model" + ) + state_dict[f"encoder.{l}.ln2.w"] = block.layer[1].layer_norm.weight + + state_dict["encoder_final_ln.w"] = t5.encoder.final_layer_norm.weight + + state_dict["decoder.0.attn.rel_pos_bias.weight"] = ( + t5.decoder.block[0].layer[0].SelfAttention.relative_attention_bias.weight + ) + + for l in range(cfg.n_layers): + block = t5.decoder.block[l] + state_dict[f"decoder.{l}.attn.W_Q"] = einops.rearrange( + block.layer[0].SelfAttention.q.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + + state_dict[f"decoder.{l}.attn.W_K"] = einops.rearrange( + block.layer[0].SelfAttention.k.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + state_dict[f"decoder.{l}.attn.W_V"] = einops.rearrange( + block.layer[0].SelfAttention.v.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + + state_dict[f"decoder.{l}.attn.W_O"] = einops.rearrange( + block.layer[0].SelfAttention.o.weight, + "m (i h) -> i h m", + i=cfg.n_heads, + ) + + state_dict[f"decoder.{l}.ln1.w"] = block.layer[0].layer_norm.weight + + state_dict[f"decoder.{l}.cross_attn.W_Q"] = einops.rearrange( + block.layer[1].EncDecAttention.q.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + + state_dict[f"decoder.{l}.cross_attn.W_K"] = einops.rearrange( + block.layer[1].EncDecAttention.k.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + + state_dict[f"decoder.{l}.cross_attn.W_V"] = einops.rearrange( + block.layer[1].EncDecAttention.v.weight, "(i h) m -> i m h", i=cfg.n_heads + ) + state_dict[f"decoder.{l}.cross_attn.W_O"] = einops.rearrange( + block.layer[1].EncDecAttention.o.weight, + "m (i h) -> i h m", + i=cfg.n_heads, + ) + state_dict[f"decoder.{l}.ln2.w"] = block.layer[1].layer_norm.weight + + # fixme DenseReluDense may be T5DenseGatedActDense instead + state_dict[f"decoder.{l}.mlp.W_in"] = einops.rearrange( + block.layer[2].DenseReluDense.wi.weight, "mlp model -> model mlp" + ) + state_dict[f"decoder.{l}.mlp.W_out"] = einops.rearrange( + block.layer[2].DenseReluDense.wo.weight, "model mlp -> mlp model" + ) + state_dict[f"decoder.{l}.ln3.w"] = block.layer[2].layer_norm.weight + + state_dict["decoder_final_ln.w"] = t5.decoder.final_layer_norm.weight + + return state_dict diff --git a/transformer_lens/utilities/activation_functions.py b/transformer_lens/utilities/activation_functions.py new file mode 100644 index 0000000..6cc7013 --- /dev/null +++ b/transformer_lens/utilities/activation_functions.py @@ -0,0 +1,26 @@ +"""Activation Functions. + +Utilities for interacting with all supported activation functions. +""" +from typing import Callable, Dict + +import torch +import torch.nn.functional as F + +from transformer_lens.utils import gelu_fast, gelu_new, solu + +# Convenient type for the format of each activation function +ActivationFunction = Callable[..., torch.Tensor] + +# All currently supported activation functions. To add a new function, simply +# put the name of the function as the key, and the value as the actual callable. +SUPPORTED_ACTIVATIONS: Dict[str, ActivationFunction] = { + "solu": solu, + "solu_ln": solu, + "gelu_new": gelu_new, + "gelu_fast": gelu_fast, + "silu": F.silu, + "relu": F.relu, + "gelu": F.gelu, + "gelu_pytorch_tanh": lambda tensor: F.gelu(tensor, approximate="tanh"), +} diff --git a/transformer_lens/utilities/addmm.py b/transformer_lens/utilities/addmm.py new file mode 100644 index 0000000..6f86b55 --- /dev/null +++ b/transformer_lens/utilities/addmm.py @@ -0,0 +1,35 @@ +"""Addmm + +Implementations of Addmm functions matching Huggingface implementations. +""" +import torch +from jaxtyping import Float + + +def vanilla_addmm( + input: Float[torch.Tensor, "... #o"], # Must be broadcastable to "m o" + mat1: Float[torch.Tensor, "m n"], + mat2: Float[torch.Tensor, "n o"], +) -> Float[torch.Tensor, "m o"]: + """Typechecked version of torch.addmm. + + Note that both mat1 and mat2 *must* be 2d matrices. + """ + return torch.addmm(input, mat1, mat2) + + +def batch_addmm( + bias: Float[torch.Tensor, "... #d_out"], # Must be broadcastable to "... d_out" + weight: Float[torch.Tensor, "d_in d_out"], + x: Float[torch.Tensor, "... d_in"], +) -> Float[torch.Tensor, "... d_out"]: + """Fused add-multiply with support for batch dimensions. + + Must match the Huggingface Conv1D implementation exactly. + https://github.com/huggingface/transformers/blob/9ba9369a2557e53a01378199a9839ec6e82d8bc7/src/transformers/pytorch_utils.py#L102-L106 + """ + n_output_features = weight.shape[-1] + size_out = x.size()[:-1] + (n_output_features,) + x = vanilla_addmm(bias, x.view(-1, x.size(-1)), weight) + x = x.view(size_out) + return x diff --git a/transformer_lens/utilities/attention.py b/transformer_lens/utilities/attention.py new file mode 100644 index 0000000..dc38bde --- /dev/null +++ b/transformer_lens/utilities/attention.py @@ -0,0 +1,38 @@ +"""Attention. + +Utilities for attention components. +""" +import einops +import torch +import torch.nn.functional as F +from jaxtyping import Float + + +def simple_attn_linear( + input: Float[torch.Tensor, "batch pos d_model"], + w: Float[torch.Tensor, "head_index d_model d_head"], + b: Float[torch.Tensor, "head_index d_head"], +) -> Float[torch.Tensor, "batch pos head_index d_head"]: + """Linear layer for attention calculation.""" + w = einops.rearrange(w, "head_index d_model d_head -> (head_index d_head) d_model") + b_ = einops.rearrange(b, "head_index d_head -> (head_index d_head)") + return F.linear(input, w, b_).reshape(input.shape[0], input.shape[1], b.shape[0], b.shape[1]) + + +def complex_attn_linear( + input: Float[torch.Tensor, "batch pos head_index d_model"], + w: Float[torch.Tensor, "head_index d_model d_head"], + b: Float[torch.Tensor, "head_index d_head"], +) -> Float[torch.Tensor, "batch pos head_index d_head"]: + """Linear layer for attention calculation. + + This is almost the same as simple_attn_linear, but the input tensor has an extra head_index dimension, used when calculating the input of each attention head separately. + """ + return ( + einops.einsum( + input, + w, + "batch pos head_index d_model, head_index d_model d_head -> batch pos head_index d_head", + ) + + b + ) diff --git a/transformer_lens/utilities/devices.py b/transformer_lens/utilities/devices.py index c8e5b78..f7de5d3 100644 --- a/transformer_lens/utilities/devices.py +++ b/transformer_lens/utilities/devices.py @@ -3,6 +3,7 @@ Utilities to get the correct device, and assist in distributing model layers across multiple devices. """ + from __future__ import annotations from typing import Optional, Union @@ -45,7 +46,11 @@ def get_device_for_block_index( def move_to_and_update_config( - model: Union["transformer_lens.HookedTransformer", "transformer_lens.HookedEncoder"], + model: Union[ + "transformer_lens.HookedTransformer", + "transformer_lens.HookedEncoder", + "transformer_lens.HookedEncoderDecoder", + ], device_or_dtype: Union[torch.device, str, torch.dtype], print_details=True, ): diff --git a/transformer_lens/utils.py b/transformer_lens/utils.py index 7071f10..7e5828e 100644 --- a/transformer_lens/utils.py +++ b/transformer_lens/utils.py @@ -115,6 +115,7 @@ def to_numpy(tensor): def lm_cross_entropy_loss( logits: Float[torch.Tensor, "batch pos d_vocab"], tokens: Int[torch.Tensor, "batch pos"], + attention_mask: Optional[Int[torch.Tensor, "batch pos"]] = None, per_token: bool = False, ) -> Union[Float[torch.Tensor, ""], Float[torch.Tensor, "batch pos"]]: """Cross entropy loss for the language model, gives the loss for predicting the NEXT token. @@ -122,6 +123,8 @@ def lm_cross_entropy_loss( Args: logits (torch.Tensor): Logits. Shape [batch, pos, d_vocab] tokens (torch.Tensor[int64]): Input tokens. Shape [batch, pos] + attention_mask (torch.Tensor[int64], optional): Attention mask. Shape [batch, pos]. Used to + mask out padding tokens. Defaults to None. per_token (bool, optional): Whether to return the log probs predicted for the correct token, or the loss (ie mean of the predicted log probs). Note that the returned array has shape [batch, seq-1] as we cannot predict the first token (alternately, we ignore the final logit). Defaults to False. """ log_probs = F.log_softmax(logits, dim=-1) @@ -129,10 +132,20 @@ def lm_cross_entropy_loss( # Offsets needed because we're predicting the NEXT token (this means the final logit is meaningless) # None and [..., 0] needed because the tensor used in gather must have the same rank. predicted_log_probs = log_probs[..., :-1, :].gather(dim=-1, index=tokens[..., 1:, None])[..., 0] + + if attention_mask is not None: + # Ignore token positions which are masked out or where the next token is masked out + # (generally padding tokens) + next_token_mask = torch.logical_and(attention_mask[:, :-1], attention_mask[:, 1:]) + predicted_log_probs *= next_token_mask + n_tokens = next_token_mask.sum().item() + else: + n_tokens = predicted_log_probs.numel() + if per_token: return -predicted_log_probs else: - return -predicted_log_probs.mean() + return -predicted_log_probs.sum() / n_tokens def lm_accuracy( @@ -179,6 +192,18 @@ def solu(input: Float[torch.Tensor, "batch pos d_mlp"]) -> Float[torch.Tensor, " return input * F.softmax(input, dim=-1) +ACTIVATION_FN_DICT = { + "solu": solu, + "solu_ln": solu, + "gelu_new": gelu_new, + "gelu_fast": gelu_fast, + "silu": F.silu, + "relu": F.relu, + "gelu": F.gelu, + "gelu_pytorch_tanh": lambda tensor: F.gelu(tensor, approximate="tanh"), +} + + def calc_fan_in_and_fan_out(tensor): """ Calculate the fan in and fan out of a tensor. We define it ourselves because Torch uses a