From 469c061ae03bb6e1779331d11b4d066cb170ce33 Mon Sep 17 00:00:00 2001 From: Frankstein <20307140057@fudan.edu.cn> Date: Mon, 22 Jul 2024 17:19:55 +0800 Subject: [PATCH] fix: typo --- src/lm_saes/activation/activation_source.py | 2 +- src/lm_saes/analysis/sample_feature_activations.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lm_saes/activation/activation_source.py b/src/lm_saes/activation/activation_source.py index 87e12d7..ad8d2a4 100644 --- a/src/lm_saes/activation/activation_source.py +++ b/src/lm_saes/activation/activation_source.py @@ -11,7 +11,7 @@ from lm_saes.activation.token_source import TokenSource from lm_saes.activation.activation_dataset import load_activation_chunk, list_activation_chunks from lm_saes.config import ActivationStoreConfig - +import torch.distributed as dist class ActivationSource(ABC): def next(self) -> Dict[str, torch.Tensor] | None: """ diff --git a/src/lm_saes/analysis/sample_feature_activations.py b/src/lm_saes/analysis/sample_feature_activations.py index 3f95ddc..d8c8a13 100644 --- a/src/lm_saes/analysis/sample_feature_activations.py +++ b/src/lm_saes/analysis/sample_feature_activations.py @@ -26,7 +26,7 @@ def sample_feature_activations( sae_chunk_id: int = 0, n_sae_chunks: int = 1, # By default, we do not chunk the SAE. When the model & SAE is large, we can chunk the SAE to save memory. ): - if cfg.use_ddp: + if sae.cfg.ddp_size > 1: raise ValueError("Sampling feature activations does not support DDP yet") assert cfg.sae.d_sae is not None # Make mypy happy