Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
Signed-off-by: shunxing12345 <[email protected]>
  • Loading branch information
shunxing12345 committed Dec 30, 2024
1 parent cc3293d commit fa79011
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 10 deletions.
9 changes: 0 additions & 9 deletions src/transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1033,7 +1033,6 @@
_import_structure["models.gpt_sw3"].append("GPTSw3Tokenizer")
_import_structure["models.layoutxlm"].append("LayoutXLMTokenizer")
_import_structure["models.llama"].append("LlamaTokenizer")
_import_structure["models.telechat2"].append("TeleChat2Tokenizer")
_import_structure["models.m2m_100"].append("M2M100Tokenizer")
_import_structure["models.marian"].append("MarianTokenizer")
_import_structure["models.mbart"].append("MBartTokenizer")
Expand Down Expand Up @@ -2648,9 +2647,6 @@
_import_structure["models.telechat2"].extend(
[
"TeleChat2ForCausalLM",
"TeleChat2ForQuestionAnswering",
"TeleChat2ForSequenceClassification",
"TeleChat2ForTokenClassification",
"TeleChat2Model",
"TeleChat2PreTrainedModel",
]
Expand Down Expand Up @@ -4860,7 +4856,6 @@
)
_import_structure["models.gptj"].extend(["FlaxGPTJForCausalLM", "FlaxGPTJModel", "FlaxGPTJPreTrainedModel"])
_import_structure["models.llama"].extend(["FlaxLlamaForCausalLM", "FlaxLlamaModel", "FlaxLlamaPreTrainedModel"])
_import_structure["models.telechat2"].extend(["FlaxTeleChat2ForCausalLM", "FlaxTeleChat2Model", "FlaxTeleChat2PreTrainedModel"])
_import_structure["models.gemma"].extend(["FlaxGemmaForCausalLM", "FlaxGemmaModel", "FlaxGemmaPreTrainedModel"])
_import_structure["models.longt5"].extend(
[
Expand Down Expand Up @@ -6065,7 +6060,6 @@
from .models.gpt_sw3 import GPTSw3Tokenizer
from .models.layoutxlm import LayoutXLMTokenizer
from .models.llama import LlamaTokenizer
from .models.telechat2 import TeleChat2Tokenizer
from .models.m2m_100 import M2M100Tokenizer
from .models.marian import MarianTokenizer
from .models.mbart import MBartTokenizer
Expand Down Expand Up @@ -7419,9 +7413,6 @@
)
from .models.telechat2 import (
TeleChat2ForCausalLM,
TeleChat2ForQuestionAnswering,
TeleChat2ForSequenceClassification,
TeleChat2ForTokenClassification,
TeleChat2Model,
TeleChat2PreTrainedModel,
)
Expand Down
1 change: 0 additions & 1 deletion tests/models/telechat2/test_modeling_telechat2.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,6 @@ def prepare_config_and_inputs_for_common(self):


@require_torch
# Copied from tests.models.mistral.test_modeling_mistral.MistralModelTest with Mistral->TeleChat2
class TeleChat2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
Expand Down

0 comments on commit fa79011

Please sign in to comment.