From fb134013369f925b10b4d1c003e4d168bd37f411 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20F=C3=BCrneisen?= Date: Fri, 26 Nov 2021 08:38:46 +0000 Subject: [PATCH 01/18] Add parameter merging to utils --- annif/backend/fasttext.py | 5 ++--- annif/backend/stwfsa.py | 10 +++------- annif/util.py | 8 ++++++++ tests/test_util.py | 23 +++++++++++++++++++++++ 4 files changed, 36 insertions(+), 10 deletions(-) diff --git a/annif/backend/fasttext.py b/annif/backend/fasttext.py index df1205c6c..b39e59599 100644 --- a/annif/backend/fasttext.py +++ b/annif/backend/fasttext.py @@ -5,6 +5,7 @@ import annif.util from annif.suggestion import SubjectSuggestion, ListSuggestionResult from annif.exception import NotInitializedException, NotSupportedException +from annif.util import apply_param_parse_config import fasttext from . import backend from . import mixins @@ -111,9 +112,7 @@ def _create_model(self, params, jobs): self.info('creating fastText model') trainpath = os.path.join(self.datadir, self.TRAIN_FILE) modelpath = os.path.join(self.datadir, self.MODEL_FILE) - params = {param: self.FASTTEXT_PARAMS[param](val) - for param, val in params.items() - if param in self.FASTTEXT_PARAMS} + params = apply_param_parse_config(self.FASTTEXT_PARAMS, params) if jobs != 0: # jobs set by user to non-default value params['thread'] = jobs self.debug('Model parameters: {}'.format(params)) diff --git a/annif/backend/stwfsa.py b/annif/backend/stwfsa.py index badf344b7..80d27eda0 100644 --- a/annif/backend/stwfsa.py +++ b/annif/backend/stwfsa.py @@ -3,7 +3,7 @@ from annif.exception import NotInitializedException, NotSupportedException from annif.suggestion import ListSuggestionResult, SubjectSuggestion from . import backend -from annif.util import atomic_save, boolean +from annif.util import atomic_save, boolean, apply_param_parse_config _KEY_CONCEPT_TYPE_URI = 'concept_type_uri' @@ -92,12 +92,8 @@ def _load_data(self, corpus): def _train(self, corpus, params, jobs=0): X, y = self._load_data(corpus) - new_params = { - key: self.STWFSA_PARAMETERS[key](val) - for key, val - in params.items() - if key in self.STWFSA_PARAMETERS - } + new_params = apply_param_parse_config( + self.STWFSA_PARAMETERS, params) p = StwfsapyPredictor( graph=self.project.vocab.as_graph(), langs=frozenset([params['language']]), diff --git a/annif/util.py b/annif/util.py index c05fc3892..11c1540c0 100644 --- a/annif/util.py +++ b/annif/util.py @@ -84,6 +84,14 @@ def parse_args(param_string): return posargs, kwargs +def apply_param_parse_config(configs, params): + """Applies a parsing configuration to a parameter dict.""" + return { + param: configs[param](val) + for param, val in params.items() + if param in configs and val is not None} + + def boolean(val): """Convert the given value to a boolean True/False value, if it isn't already. True values are '1', 'yes', 'true', and 'on' (case insensitive), everything diff --git a/tests/test_util.py b/tests/test_util.py index 0c1b2fa05..be0eab475 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -1,6 +1,7 @@ """Unit tests for Annif utility functions""" import annif.util +from unittest.mock import MagicMock def test_boolean(): @@ -33,3 +34,25 @@ def test_metric_code(): for input, output in zip(inputs, outputs): assert annif.util.metric_code(input) == output + + +def test_apply_parse_param_config(): + fun0 = MagicMock() + fun0.return_value = 23 + fun1 = MagicMock() + fun1.return_value = 'ret' + configs = { + 'a': fun0, + 'c': fun1 + } + params = { + 'a': 0, + 'b': 23, + 'c': None + } + ret = annif.util.apply_param_parse_config(configs, params) + assert ret == { + 'a': 23 + } + fun0.assert_called_once_with(0) + fun1.assert_not_called() From e249715dedbd724e08ee2f72be55b591a71bf6ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20F=C3=BCrneisen?= Date: Fri, 3 Dec 2021 11:24:03 +0000 Subject: [PATCH 02/18] Allow atomic save to handle directories. --- annif/backend/omikuji.py | 8 +++---- annif/util.py | 27 +++++++++++++++------- tests/test_util.py | 48 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 12 deletions(-) diff --git a/annif/backend/omikuji.py b/annif/backend/omikuji.py index 15922dcc9..3ff021f75 100644 --- a/annif/backend/omikuji.py +++ b/annif/backend/omikuji.py @@ -2,7 +2,6 @@ import omikuji import os.path -import shutil import annif.util from annif.suggestion import SubjectSuggestion, ListSuggestionResult from annif.exception import NotInitializedException, NotSupportedException, \ @@ -95,9 +94,10 @@ def _create_model(self, params, jobs): self._model = omikuji.Model.train_on_data( train_path, hyper_param, jobs or None) - if os.path.exists(model_path): - shutil.rmtree(model_path) - self._model.save(os.path.join(self.datadir, self.MODEL_FILE)) + annif.util.atomic_save( + self._model, + model_path, + None) def _train(self, corpus, params, jobs=0): if corpus != 'cached': diff --git a/annif/util.py b/annif/util.py index 11c1540c0..b7f471147 100644 --- a/annif/util.py +++ b/annif/util.py @@ -3,6 +3,7 @@ import glob import os import os.path +from shutil import rmtree import tempfile import numpy as np from annif import logger @@ -13,21 +14,31 @@ def atomic_save(obj, dirname, filename, method=None): """Save the given object (which must have a .save() method, unless the method parameter is given) into the given directory with the given filename, using a temporary file and renaming the temporary file to the - final name.""" - - prefix, suffix = os.path.splitext(filename) - tempfd, tempfilename = tempfile.mkstemp( - prefix=prefix, suffix=suffix, dir=dirname) - os.close(tempfd) + final name. To save a directory explicitly set filename=None.""" + + if filename: + prefix, suffix = os.path.splitext(filename) + tempfd, tempfilename = tempfile.mkstemp( + prefix=prefix, suffix=suffix, dir=dirname) + os.close(tempfd) + target_pth = os.path.join(dirname, filename) + else: + tldir = os.path.dirname(dirname.rstrip('/')) + os.makedirs(dirname, exist_ok=tldir) + tempdir = tempfile.TemporaryDirectory(dir=tldir) + tempfilename = tempdir.name + target_pth = dirname logger.debug('saving %s to temporary file %s', str(obj)[:90], tempfilename) if method is not None: method(obj, tempfilename) else: obj.save(tempfilename) for fn in glob.glob(tempfilename + '*'): - newname = fn.replace(tempfilename, os.path.join(dirname, filename)) + newname = fn.replace(tempfilename, target_pth) logger.debug('renaming temporary file %s to %s', fn, newname) - os.rename(fn, newname) + if os.path.isdir(newname): + rmtree(newname) + os.replace(fn, newname) def cleanup_uri(uri): diff --git a/tests/test_util.py b/tests/test_util.py index be0eab475..45a316979 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -2,6 +2,7 @@ import annif.util from unittest.mock import MagicMock +import os.path as osp def test_boolean(): @@ -56,3 +57,50 @@ def test_apply_parse_param_config(): } fun0.assert_called_once_with(0) fun1.assert_not_called() + + +def _save(obj, pth): + with open(pth, 'w') as f: + print('test file content', file=f) + + +def test_atomic_save_method(tmpdir): + fname = 'tst_file_method.txt' + annif.util.atomic_save(None, tmpdir.strpath, fname, method=_save) + f_pth = tmpdir.join(fname) + assert f_pth.exists() + with f_pth.open() as f: + assert f.readlines() == ['test file content\n'] + + +def test_atomic_save(tmpdir): + fname = 'tst_file_obj.txt' + to_save = MagicMock() + to_save.save.side_effect = lambda pth: _save(None, pth) + annif.util.atomic_save(to_save, tmpdir.strpath, fname) + f_pth = tmpdir.join(fname) + assert f_pth.exists() + with f_pth.open() as f: + assert f.readlines() == ['test file content\n'] + to_save.save.assert_called_once() + call_args = to_save.save.calls[0].args + assert isinstance(call_args[0], MagicMock) + assert call_args[1] != f_pth.strpath + + +def test_atomic_save_folder(tmpdir): + folder_name = 'test_save' + fname_0 = 'tst_file_0' + fname_1 = 'tst_file_1' + + def save_folder(obj, pth): + _save(None, osp.join(pth, fname_0)) + _save(None, osp.join(pth, fname_1)) + folder_path = tmpdir.join(folder_name) + annif.util.atomic_save(None, folder_path.strpath, None, method=save_folder) + assert folder_path.exists() + for f_name in [fname_0, fname_1]: + f_pth = folder_path.join(f_name) + assert f_pth.exists() + with f_pth.open() as f: + assert f.readlines() == ['test file content\n'] From 5cc207b4a75ba17da8c4a5a2891a29d40836d9d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20F=C3=BCrneisen?= Date: Mon, 6 Dec 2021 09:21:48 +0000 Subject: [PATCH 03/18] Add XTransformer backend. --- annif/backend/__init__.py | 10 + annif/backend/xtransformer.py | 248 +++++++++++++++++++++++++ setup.py | 1 + tests/test_backend_xtransformer.py | 281 +++++++++++++++++++++++++++++ 4 files changed, 540 insertions(+) create mode 100644 annif/backend/xtransformer.py create mode 100644 tests/test_backend_xtransformer.py diff --git a/annif/backend/__init__.py b/annif/backend/__init__.py index 5ef28c9ac..56f6c05c8 100644 --- a/annif/backend/__init__.py +++ b/annif/backend/__init__.py @@ -67,6 +67,15 @@ def _tfidf(): return tfidf.TFIDFBackend +def _xtransformer(): + try: + from . import xtransformer + xtransformer.XTransformerBackend + except ImportError: + raise ValueError( + "XTransformer not available, not enabling XTransformer backend") + + def _yake(): try: from . import yake @@ -88,6 +97,7 @@ def _yake(): 'stwfsa': _stwfsa, 'svc': _svc, 'tfidf': _tfidf, + 'xtransformer': _xtransformer, 'yake': _yake } diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py new file mode 100644 index 000000000..0abc60c78 --- /dev/null +++ b/annif/backend/xtransformer.py @@ -0,0 +1,248 @@ +"""Annif backend using the transformer variant of pecos.""" + +from sys import stdout +import os.path as osp +import logging +import scipy.sparse as sp +import numpy as np + +from annif.exception import NotInitializedException, NotSupportedException +from annif.suggestion import ListSuggestionResult, SubjectSuggestion +from . import mixins +from . import backend +from annif.util import boolean, apply_param_parse_config, atomic_save + +from pecos.xmc.xtransformer.model import XTransformer +from pecos.xmc.xtransformer.module import MLProblemWithText +from pecos.utils.featurization.text.preprocess import Preprocessor +from pecos.xmc.xtransformer import matcher + + +class XTransformerBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): + """XTransformer based backend for Annif""" + name = 'xtransformer' + needs_subject_index = True + + _model = None + + train_X_file = 'xtransformer-train-X.npz' + train_y_file = 'xtransformer-train-y.npz' + train_txt_file = 'xtransformer-train-raw.txt' + model_folder = 'xtransformer-model' + + PARAM_CONFIG = { + 'min_df': int, + 'ngram': int, + 'fix_clustering': boolean, + 'nr_splits': int, + 'min_codes': int, + 'max_leaf_size': int, + 'imbalanced_ratio': float, + 'imbalanced_depth': int, + 'max_match_clusters': int, + 'do_fine_tune': boolean, + 'model_shortcut': str, + 'beam_size': int, + 'limit': int, + 'post_processor': str, + 'negative_sampling': str, + 'ensemble_method': str, + 'threshold': float, + 'loss_function': str, + 'truncate_length': int, + 'hidden_droput_prob': float, + 'batch_size': int, + 'gradient_accumulation_steps': int, + 'learning_rate': float, + 'weight_decay': float, + 'adam_epsilon': float, + 'num_train_epochs': int, + 'max_steps': int, + 'lr_schedule': str, + 'warmup_steps': int, + 'logging_steps': int, + 'save_steps': int, + 'max_active_matching_labels': int, + 'max_num_labels_in_gpu': int, + 'use_gpu': boolean, + 'bootstrap_model': str + } + + DEFAULT_PARAMETERS = { + 'min_df': 1, + 'ngram': 1, + 'fix_clustering': False, + 'nr_splits': 16, + 'min_codes': None, + 'max_leaf_size': 100, + 'imbalanced_ratio': 0.0, + 'imbalanced_depth': 100, + 'max_match_clusters': 32768, + 'do_fine_tune': True, + # 'model_shortcut': 'distilbert-base-multilingual-cased', + 'model_shortcut': 'bert-base-multilingual-uncased', + 'beam_size': 20, + 'limit': 100, + 'post_processor': 'sigmoid', + 'negative_sampling': 'tfn', + 'ensemble_method': 'transformer-only', + 'threshold': 0.1, + 'loss_function': 'squared-hinge', + 'truncate_length': 128, + 'hidden_droput_prob': 0.1, + 'batch_size': 32, + 'gradient_accumulation_steps': 1, + 'learning_rate': 1e-4, + 'weight_decay': 0.0, + 'adam_epsilon': 1e-8, + 'num_train_epochs': 1, + 'max_steps': 0, + 'lr_schedule': 'linear', + 'warmup_steps': 0, + 'logging_steps': 100, + 'save_steps': 1000, + 'max_active_matching_labels': None, + 'max_num_labels_in_gpu': 65536, + 'use_gpu': True, + 'bootstrap_model': 'linear' + } + + def _initialize_model(self): + if self._model is None: + path = osp.join(self.datadir, self.model_folder) + self.debug('loading model from {}'.format(path)) + if osp.exists(path): + self._model = XTransformer.load(path) + else: + raise NotInitializedException( + 'model {} not found'.format(path), + backend_id=self.backend_id) + + def initialize(self, parallel=False): + self.initialize_vectorizer() + self._initialize_model() + + def default_params(self): + params = backend.AnnifBackend.DEFAULT_PARAMETERS.copy() + params.update(self.DEFAULT_PARAMETERS) + return params + + def _create_train_files(self, veccorpus, corpus): + self.info('creating train file') + Xs = [] + ys = [] + txt_pth = osp.join(self.datadir, self.train_txt_file) + with open(txt_pth, 'w', encoding='utf-8') as txt_file: + for doc, vector in zip(corpus.documents, veccorpus): + subject_ids = [ + self.project.subjects.by_uri(uri) + for uri + in doc.uris] + subject_ids = [s_id for s_id in subject_ids if s_id] + if not (subject_ids and doc.text): + continue # noqa + print(' '.join(doc.text.split()), file=txt_file) + Xs.append( + sp.csr_matrix(vector, dtype=np.float32).sorted_indices()) + ys.append( + sp.csr_matrix(( + np.ones(len(subject_ids)), + ( + np.zeros(len(subject_ids)), + subject_ids)), + shape=(1, len(self.project.subjects)), + dtype=np.float32 + ).sorted_indices()) + atomic_save( + sp.vstack(Xs, format='csr'), + self.datadir, + self.train_X_file, + method=lambda mtrx, target: sp.save_npz( + target, + mtrx, + compressed=True)) + atomic_save( + sp.vstack(ys, format='csr'), + self.datadir, + self.train_y_file, + method=lambda mtrx, target: sp.save_npz( + target, + mtrx, + compressed=True)) + + def _create_model(self, params, jobs): + train_txts = Preprocessor.load_data_from_file( + osp.join(self.datadir, self.train_txt_file), + label_text_path=None, + text_pos=0)['corpus'] + train_X = sp.load_npz(osp.join(self.datadir, self.train_X_file)) + train_y = sp.load_npz(osp.join(self.datadir, self.train_y_file)) + model_path = osp.join(self.datadir, self.model_folder) + new_params = apply_param_parse_config( + self.PARAM_CONFIG, + self.DEFAULT_PARAMETERS) + new_params['only_topk'] = new_params.pop('limit') + train_params = XTransformer.TrainParams.from_dict( + new_params, + recursive=True).to_dict() + pred_params = XTransformer.PredParams.from_dict( + new_params, + recursive=True).to_dict() + + self.info('Start training') + # enable progress + matcher.LOGGER.setLevel(logging.INFO) + matcher.LOGGER.addHandler(logging.StreamHandler(stream=stdout)) + self._model = XTransformer.train( + MLProblemWithText(train_txts, train_y, X_feat=train_X), + clustering=None, + val_prob=None, + train_params=train_params, + pred_params=pred_params, + beam_size=params['beam_size'], + steps_scale=None, + label_feat=None, + ) + atomic_save(self._model, model_path, None) + + def _train(self, corpus, params, jobs=0): + if corpus == 'cached': + self.info("Reusing cached training data from previous run.") + else: + if corpus.is_empty(): + raise NotSupportedException( + 'Cannot t project with no documents') + input = (doc.text for doc in corpus.documents) + vecparams = {'min_df': int(params['min_df']), + 'tokenizer': self.project.analyzer.tokenize_words, + 'ngram_range': (1, int(params['ngram']))} + veccorpus = self.create_vectorizer(input, vecparams) + self._create_train_files(veccorpus, corpus) + self._create_model(params, jobs) + + def _suggest(self, text, params): + text = ' '.join(text.split()) + vector = self.vectorizer.transform([text]) + if vector.nnz == 0: # All zero vector, empty result + return ListSuggestionResult([]) + new_params = apply_param_parse_config( + self.PARAM_CONFIG, + params + ) + prediction = self._model.predict( + [text], + X_feat=vector.sorted_indices(), + batch_size=params['batch_size'], + use_gpu=new_params['use_gpu'], + only_top_k=new_params['limit'], + post_processor=new_params['post_processor']) + results = [] + for idx, score in zip(prediction.indices, prediction.data): + subject = self.project.subjects[idx] + results.append(SubjectSuggestion( + uri=subject[0], + label=subject[1], + notation=subject[2], + score=score + )) + return ListSuggestionResult(results) diff --git a/setup.py b/setup.py index 3d279f3b1..9c947100b 100644 --- a/setup.py +++ b/setup.py @@ -53,6 +53,7 @@ def read(fname): 'yake': ['yake==0.4.5'], 'pycld3': ['pycld3'], 'spacy': ['spacy==3.3.*'], + 'pecos': ['libpecos==0.2.3'], 'dev': [ 'codecov', 'coverage<=6.2', diff --git a/tests/test_backend_xtransformer.py b/tests/test_backend_xtransformer.py new file mode 100644 index 000000000..bbd2de9cd --- /dev/null +++ b/tests/test_backend_xtransformer.py @@ -0,0 +1,281 @@ +"""Unit tests for the XTransformer backend in Annif""" + +from scipy.sparse import load_npz, csr_matrix + +from os import mknod +import os.path as osp +import pytest +from unittest.mock import MagicMock, patch +import annif.backend +import annif.corpus +from annif.exception import NotInitializedException, NotSupportedException + + +pytest.importorskip('annif.backend.xtransformer') +XTransformer = annif.backend.xtransformer.XTransformer + + +@pytest.fixture +def mocked_xtransformer(datadir, project): + model_mock = MagicMock() + model_mock.save.side_effect = lambda x: mknod(osp.join(x, 'test')) + + return patch.object( + annif.backend.xtransformer.XTransformer, + 'train', + return_value=model_mock) + + +def test_xtransformer_default_params(project): + backend_type = annif.backend.get_backend('xtransformer') + xtransformer = backend_type( + backend_id='xtransfomer', + config_params={}, + project=project + ) + expected = { + 'min_df': 1, + 'ngram': 1, + 'fix_clustering': False, + 'nr_splits': 16, + 'min_codes': None, + 'max_leaf_size': 100, + 'imbalanced_ratio': 0.0, + 'imbalanced_depth': 100, + 'max_match_clusters': 32768, + 'do_fine_tune': True, + # 'model_shortcut': 'distilbert-base-multilingual-cased', + 'model_shortcut': 'bert-base-multilingual-uncased', + 'beam_size': 20, + 'limit': 100, + 'post_processor': 'sigmoid', + 'negative_sampling': 'tfn', + 'ensemble_method': 'transformer-only', + 'threshold': 0.1, + 'loss_function': 'squared-hinge', + 'truncate_length': 128, + 'hidden_droput_prob': 0.1, + 'batch_size': 32, + 'gradient_accumulation_steps': 1, + 'learning_rate': 1e-4, + 'weight_decay': 0.0, + 'adam_epsilon': 1e-8, + 'num_train_epochs': 1, + 'max_steps': 0, + 'lr_schedule': 'linear', + 'warmup_steps': 0, + 'logging_steps': 100, + 'save_steps': 1000, + 'max_active_matching_labels': None, + 'max_num_labels_in_gpu': 65536, + 'use_gpu': True, + 'bootstrap_model': 'linear' + } + actual = xtransformer.params + assert len(actual) == len(expected) + for param, val in expected.items(): + assert param in actual and actual[param] == val + + +def test_xtransformer_suggest_no_vectorizer(project): + backend_type = annif.backend.get_backend('xtransformer') + xtransformer = backend_type( + backend_id='xtransfomer', + config_params={}, + project=project + ) + with pytest.raises(NotInitializedException): + xtransformer.suggest('example text') + + +def test_xtransformer_create_train_files(tmpdir, project, datadir): + tmpfile = tmpdir.join('document.tsv') + tmpfile.write("nonexistent\thttp://example.com/nonexistent\n" + + "arkeologia\thttp://www.yso.fi/onto/yso/p1265\n" + + "...\thttp://example.com/none") + corpus = annif.corpus.DocumentFile(str(tmpfile)) + backend_type = annif.backend.get_backend('xtransformer') + xtransformer = backend_type( + backend_id='xtransformer', + config_params={}, + project=project) + input = (doc.text for doc in corpus.documents) + veccorpus = xtransformer.create_vectorizer(input, {}) + xtransformer._create_train_files(veccorpus, corpus) + assert datadir.join('xtransformer-train-X.npz').exists() + assert datadir.join('xtransformer-train-y.npz').exists() + assert datadir.join('xtransformer-train-raw.txt').exists() + traindata = datadir.join('xtransformer-train-raw.txt').read().splitlines() + assert len(traindata) == 1 + train_features = load_npz(str(datadir.join('xtransformer-train-X.npz'))) + assert train_features.shape[0] == 1 + train_labels = load_npz(str(datadir.join('xtransformer-train-y.npz'))) + assert train_labels.shape[0] == 1 + + +def test_xtransformer_train( + datadir, + document_corpus, + project, + mocked_xtransformer): + backend_type = annif.backend.get_backend('xtransformer') + xtransformer = backend_type( + backend_id='xtransfomer', + config_params={}, + project=project + ) + + with mocked_xtransformer as train_mock: + xtransformer.train(document_corpus) + + train_mock.assert_called_once() + first_arg = train_mock.call_args.args[0] + kwargs = train_mock.call_args.kwargs + assert len(first_arg.X_text) == 6397 + assert first_arg.X_feat.shape == (6397, 12480) + assert first_arg.Y.shape == (6397, 130) + expected_pred_params = XTransformer.PredParams.from_dict( + { + 'beam_size': 20, + 'only_topk': 100, + 'post_processor': 'sigmoid', + 'truncate_length': 128, + }, + recursive=True).to_dict() + + expected_train_params = XTransformer.TrainParams.from_dict( + { + 'do_fine_tune': True, + 'only_encoder': False, + 'fix_clustering': False, + 'max_match_clusters': 32768, + 'nr_splits': 16, + 'max_leaf_size': 100, + 'imbalanced_ratio': 0.0, + 'imbalanced_depth': 100, + 'max_match_clusters': 32768, + 'do_fine_tune': True, + 'model_shortcut': 'bert-base-multilingual-uncased', + # 'model_shortcut': 'distilbert-base-multilingual-cased', + 'post_processor': 'sigmoid', + 'negative_sampling': 'tfn', + 'ensemble_method': 'transformer-only', + 'threshold': 0.1, + 'loss_function': 'squared-hinge', + 'truncate_length': 128, + 'hidden_droput_prob': 0.1, + 'batch_size': 32, + 'gradient_accumulation_steps': 1, + 'learning_rate': 1e-4, + 'weight_decay': 0.0, + 'adam_epsilon': 1e-8, + 'num_train_epochs': 1, + 'max_steps': 0, + 'lr_schedule': 'linear', + 'warmup_steps': 0, + 'logging_steps': 100, + 'save_steps': 1000, + 'max_active_matching_labels': None, + 'max_num_labels_in_gpu': 65536, + 'use_gpu': True, + 'bootstrap_model': 'linear', + }, + recursive=True).to_dict() + + assert kwargs == { + 'clustering': None, + 'val_prob': None, + 'steps_scale': None, + 'label_feat': None, + 'beam_size': 20, + 'pred_params': expected_pred_params, + 'train_params': expected_train_params + } + xtransformer._model.save.assert_called_once() + assert datadir.join('xtransformer-model').check() + + +def test_xtransformer_train_cached(mocked_xtransformer, datadir, project): + backend_type = annif.backend.get_backend('xtransformer') + xtransformer = backend_type( + backend_id='xtransfomer', + config_params={}, + project=project + ) + xtransformer._create_train_files = MagicMock() + xtransformer._create_model = MagicMock() + with mocked_xtransformer: + xtransformer.train('cached') + xtransformer._create_train_files.assert_not_called() + xtransformer._create_model.assert_called_once() + + +def test_xtransfomer_train_no_documents(datadir, project, empty_corpus): + backend_type = annif.backend.get_backend('xtransformer') + xtransformer = backend_type( + backend_id='xtransfomer', + config_params={}, + project=project + ) + with pytest.raises(NotSupportedException): + xtransformer.train(empty_corpus) + + +def test_xtransformer_suggest(project): + backend_type = annif.backend.get_backend('xtransformer') + xtransformer = backend_type( + backend_id='xtransfomer', + config_params={}, + project=project + ) + xtransformer._model = MagicMock() + xtransformer._model.predict.return_value = csr_matrix( + [0, 0.2, 0, 0, 0, 0.5, 0] + ) + result = xtransformer.suggest("""Arkeologiaa sanotaan joskus myös + muinaistutkimukseksi tai muinaistieteeksi. Se on humanistinen tiede + tai oikeammin joukko tieteitä, jotka tutkivat ihmisen menneisyyttä. + Tutkimusta tehdään analysoimalla muinaisjäännöksiä eli niitä jälkiä, + joita ihmisten toiminta on jättänyt maaperään tai vesistöjen + pohjaan.""") + xtransformer._model.predict.assert_called_once() + + expected = [ + annif.suggestion.SubjectSuggestion( + uri=project.subjects._uris[1], + label=project.subjects._labels[1], + notation=None, + score=0.2 + ), + annif.suggestion.SubjectSuggestion( + uri=project.subjects._uris[5], + label=project.subjects._labels[5], + notation=None, + score=0.5 + ) + ] + assert result.as_list(None) == expected + + +def test_xtransformer_suggest_no_input(project, datadir): + backend_type = annif.backend.get_backend('xtransformer') + xtransformer = backend_type( + backend_id='xtransfomer', + config_params={'limit': 5}, + project=project + ) + xtransformer._model = MagicMock() + results = xtransformer.suggest('j') + assert len(results.as_list(None)) == 0 + + +def test_xtransformer_suggest_no_model(datadir, project): + backend_type = annif.backend.get_backend('xtransformer') + xtransformer = backend_type( + backend_id='xtransfomer', + config_params={}, + project=project + ) + datadir.remove() + with pytest.raises(NotInitializedException): + xtransformer.suggest('example text') From 5a18d987ece54857b51a05c549dcee01fa359b5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20F=C3=BCrneisen?= Date: Wed, 8 Dec 2021 08:37:59 +0000 Subject: [PATCH 04/18] Remove redundant import in fasttext --- annif/backend/fasttext.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/annif/backend/fasttext.py b/annif/backend/fasttext.py index b39e59599..1662191c7 100644 --- a/annif/backend/fasttext.py +++ b/annif/backend/fasttext.py @@ -5,7 +5,6 @@ import annif.util from annif.suggestion import SubjectSuggestion, ListSuggestionResult from annif.exception import NotInitializedException, NotSupportedException -from annif.util import apply_param_parse_config import fasttext from . import backend from . import mixins @@ -112,7 +111,8 @@ def _create_model(self, params, jobs): self.info('creating fastText model') trainpath = os.path.join(self.datadir, self.TRAIN_FILE) modelpath = os.path.join(self.datadir, self.MODEL_FILE) - params = apply_param_parse_config(self.FASTTEXT_PARAMS, params) + params = annif.util.apply_param_parse_config( + self.FASTTEXT_PARAMS, params) if jobs != 0: # jobs set by user to non-default value params['thread'] = jobs self.debug('Model parameters: {}'.format(params)) From 612996502631d4daff9ca44e16ecd1a239897d9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20F=C3=BCrneisen?= Date: Wed, 8 Dec 2021 08:54:03 +0000 Subject: [PATCH 05/18] Use parsed parameter in suggest batch_size. --- annif/backend/xtransformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index 0abc60c78..fae2ed546 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -232,7 +232,7 @@ def _suggest(self, text, params): prediction = self._model.predict( [text], X_feat=vector.sorted_indices(), - batch_size=params['batch_size'], + batch_size=new_params['batch_size'], use_gpu=new_params['use_gpu'], only_top_k=new_params['limit'], post_processor=new_params['post_processor']) From 02ff7729c08d2b465ed30baaddf2be35a7fd7170 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20F=C3=BCrneisen?= Date: Fri, 17 Dec 2021 14:31:32 +0000 Subject: [PATCH 06/18] Use provided parameters in xtransformer training. --- annif/backend/xtransformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index fae2ed546..a44c8a711 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -180,7 +180,7 @@ def _create_model(self, params, jobs): model_path = osp.join(self.datadir, self.model_folder) new_params = apply_param_parse_config( self.PARAM_CONFIG, - self.DEFAULT_PARAMETERS) + self.params) new_params['only_topk'] = new_params.pop('limit') train_params = XTransformer.TrainParams.from_dict( new_params, From 3d06ebeea960041fb825eedea1e3afb272a900d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20F=C3=BCrneisen?= Date: Thu, 6 Jan 2022 09:49:22 +0000 Subject: [PATCH 07/18] Fix import for Xtransformer --- annif/backend/__init__.py | 2 +- tests/test_backend.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/annif/backend/__init__.py b/annif/backend/__init__.py index 56f6c05c8..25c6a695d 100644 --- a/annif/backend/__init__.py +++ b/annif/backend/__init__.py @@ -70,7 +70,7 @@ def _tfidf(): def _xtransformer(): try: from . import xtransformer - xtransformer.XTransformerBackend + return xtransformer.XTransformerBackend except ImportError: raise ValueError( "XTransformer not available, not enabling XTransformer backend") diff --git a/tests/test_backend.py b/tests/test_backend.py index 19d913773..6a8cbff5b 100644 --- a/tests/test_backend.py +++ b/tests/test_backend.py @@ -86,3 +86,11 @@ def test_get_backend_yake_not_installed(): with pytest.raises(ValueError) as excinfo: annif.backend.get_backend('yake') assert 'YAKE not available' in str(excinfo.value) + + +@pytest.mark.skipif(importlib.util.find_spec("pecos") is not None, + reason="test requires that YAKE is NOT installed") +def test_get_backend_xtransformer_not_installed(): + with pytest.raises(ValueError) as excinfo: + annif.backend.get_backend('xtransformer') + assert 'XTransformer not available' in str(excinfo.value) From 8555bab678bf8929df7702b1f3486c1841e44aba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20F=C3=BCrneisen?= Date: Thu, 6 Jan 2022 09:49:54 +0000 Subject: [PATCH 08/18] Split atomic_save in folder and directory variant. --- annif/backend/omikuji.py | 5 ++-- annif/backend/xtransformer.py | 5 ++-- annif/util.py | 50 ++++++++++++++++++++++++----------- tests/test_util.py | 5 +++- 4 files changed, 44 insertions(+), 21 deletions(-) diff --git a/annif/backend/omikuji.py b/annif/backend/omikuji.py index 3ff021f75..5a8c5b6a4 100644 --- a/annif/backend/omikuji.py +++ b/annif/backend/omikuji.py @@ -94,10 +94,9 @@ def _create_model(self, params, jobs): self._model = omikuji.Model.train_on_data( train_path, hyper_param, jobs or None) - annif.util.atomic_save( + annif.util.atomic_save_folder( self._model, - model_path, - None) + model_path) def _train(self, corpus, params, jobs=0): if corpus != 'cached': diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index a44c8a711..f76a1dfe9 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -10,7 +10,8 @@ from annif.suggestion import ListSuggestionResult, SubjectSuggestion from . import mixins from . import backend -from annif.util import boolean, apply_param_parse_config, atomic_save +from annif.util import boolean, apply_param_parse_config, atomic_save_folder, \ + atomic_save from pecos.xmc.xtransformer.model import XTransformer from pecos.xmc.xtransformer.module import MLProblemWithText @@ -203,7 +204,7 @@ def _create_model(self, params, jobs): steps_scale=None, label_feat=None, ) - atomic_save(self._model, model_path, None) + atomic_save_folder(self._model, model_path) def _train(self, corpus, params, jobs=0): if corpus == 'cached': diff --git a/annif/util.py b/annif/util.py index b7f471147..654e20c5b 100644 --- a/annif/util.py +++ b/annif/util.py @@ -14,27 +14,47 @@ def atomic_save(obj, dirname, filename, method=None): """Save the given object (which must have a .save() method, unless the method parameter is given) into the given directory with the given filename, using a temporary file and renaming the temporary file to the - final name. To save a directory explicitly set filename=None.""" - - if filename: - prefix, suffix = os.path.splitext(filename) - tempfd, tempfilename = tempfile.mkstemp( - prefix=prefix, suffix=suffix, dir=dirname) - os.close(tempfd) - target_pth = os.path.join(dirname, filename) - else: - tldir = os.path.dirname(dirname.rstrip('/')) - os.makedirs(dirname, exist_ok=tldir) - tempdir = tempfile.TemporaryDirectory(dir=tldir) - tempfilename = tempdir.name - target_pth = dirname + final name. + The .save() mehod or the function provided in the method argument + will be called with the path to the temporary file.""" + + prefix, suffix = os.path.splitext(filename) + tempfd, tempfilename = tempfile.mkstemp( + prefix=prefix, suffix=suffix, dir=dirname) + os.close(tempfd) logger.debug('saving %s to temporary file %s', str(obj)[:90], tempfilename) if method is not None: method(obj, tempfilename) else: obj.save(tempfilename) for fn in glob.glob(tempfilename + '*'): - newname = fn.replace(tempfilename, target_pth) + newname = fn.replace(tempfilename, os.path.join(dirname, filename)) + logger.debug('renaming temporary file %s to %s', fn, newname) + os.rename(fn, newname) + + +def atomic_save_folder(obj, dirname, method=None): + """Save the given object (which must have a .save() method, unless the + method parameter is given) into the given directory, + using a temporary directory and renaming the temporary directory to the + final name. + The .save() mehod or the function provided in the method argument + will be called with the path to the temporary directory.""" + + tldir = os.path.dirname(dirname.rstrip('/')) + os.makedirs(dirname, exist_ok=tldir) + tempdir = tempfile.TemporaryDirectory(dir=tldir) + temp_dir_name = tempdir.name + target_pth = dirname + logger.debug( + 'saving %s to temporary file %s', str(obj)[:90], + temp_dir_name) + if method is not None: + method(obj, temp_dir_name) + else: + obj.save(temp_dir_name) + for fn in glob.glob(temp_dir_name + '*'): + newname = fn.replace(temp_dir_name, target_pth) logger.debug('renaming temporary file %s to %s', fn, newname) if os.path.isdir(newname): rmtree(newname) diff --git a/tests/test_util.py b/tests/test_util.py index 45a316979..56d28b40f 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -97,7 +97,10 @@ def save_folder(obj, pth): _save(None, osp.join(pth, fname_0)) _save(None, osp.join(pth, fname_1)) folder_path = tmpdir.join(folder_name) - annif.util.atomic_save(None, folder_path.strpath, None, method=save_folder) + annif.util.atomic_save_folder( + None, + folder_path.strpath, + method=save_folder) assert folder_path.exists() for f_name in [fname_0, fname_1]: f_pth = folder_path.join(f_name) From c11ba38621390222766dafaa3e26e5e0bc3717d3 Mon Sep 17 00:00:00 2001 From: Moritz Fuerneisen Date: Mon, 14 Mar 2022 09:41:23 +0100 Subject: [PATCH 09/18] Disable gpu use for xtransformer suggest. --- annif/backend/xtransformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index f76a1dfe9..1d92a7f25 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -234,7 +234,7 @@ def _suggest(self, text, params): [text], X_feat=vector.sorted_indices(), batch_size=new_params['batch_size'], - use_gpu=new_params['use_gpu'], + use_gpu=False, only_top_k=new_params['limit'], post_processor=new_params['post_processor']) results = [] From 4a82ea2034c87efc9d441b1221a26f3ff2f8ae82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20F=C3=BCrneisen?= Date: Fri, 2 Sep 2022 14:05:41 +0200 Subject: [PATCH 10/18] Update pecos dependency. --- Dockerfile | 8 ++++++-- setup.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1f6849adc..bda433206 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM python:3.8-slim-bullseye AS builder LABEL maintainer="Juho Inkinen " SHELL ["/bin/bash", "-c"] -ARG optional_dependencies=dev,voikko,pycld3,fasttext,nn,omikuji,yake,spacy +ARG optional_dependencies=dev,voikko,pycld3,fasttext,nn,omikuji,yake,spacy,pecos # Bulding fastText needs some system packages RUN if [[ $optional_dependencies =~ "fasttext" ]]; then \ apt-get update && \ @@ -20,7 +20,7 @@ FROM python:3.8-slim-bullseye SHELL ["/bin/bash", "-c"] COPY --from=builder /usr/local/lib/python3.8 /usr/local/lib/python3.8 -ARG optional_dependencies=dev,voikko,pycld3,fasttext,nn,omikuji,yake,spacy +ARG optional_dependencies=dev,voikko,pycld3,fasttext,nn,omikuji,yake,spacy,pecos # Install system dependencies needed at runtime: RUN apt-get update && \ if [[ $optional_dependencies =~ "voikko" ]]; then \ @@ -49,6 +49,10 @@ RUN if [[ $optional_dependencies =~ "spacy" ]]; then \ python -m spacy download $model; \ done; \ fi +RUN if [[ $optional_dependencies =~ "pecos" ]]; then \ + mkdir /.cache -m a=rwx; \ + fi + # Install Annif by copying source and make the installation editable: COPY annif /Annif/annif diff --git a/setup.py b/setup.py index 9c947100b..0c4cbcb72 100644 --- a/setup.py +++ b/setup.py @@ -53,7 +53,7 @@ def read(fname): 'yake': ['yake==0.4.5'], 'pycld3': ['pycld3'], 'spacy': ['spacy==3.3.*'], - 'pecos': ['libpecos==0.2.3'], + 'pecos': ['libpecos==0.4.*'], 'dev': [ 'codecov', 'coverage<=6.2', From 367e493c27ab6e8dc6c5f4d6b20cc4360a3a99ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Moritz=20F=C3=BCrneisen?= Date: Fri, 2 Sep 2022 14:06:09 +0200 Subject: [PATCH 11/18] Adapt xtransformer backend to new vocab model. --- annif/backend/xtransformer.py | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index 1d92a7f25..983403fd6 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -80,8 +80,7 @@ class XTransformerBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): 'imbalanced_depth': 100, 'max_match_clusters': 32768, 'do_fine_tune': True, - # 'model_shortcut': 'distilbert-base-multilingual-cased', - 'model_shortcut': 'bert-base-multilingual-uncased', + 'model_shortcut': 'distilbert-base-multilingual-cased', 'beam_size': 20, 'limit': 100, 'post_processor': 'sigmoid', @@ -135,22 +134,18 @@ def _create_train_files(self, veccorpus, corpus): txt_pth = osp.join(self.datadir, self.train_txt_file) with open(txt_pth, 'w', encoding='utf-8') as txt_file: for doc, vector in zip(corpus.documents, veccorpus): - subject_ids = [ - self.project.subjects.by_uri(uri) - for uri - in doc.uris] - subject_ids = [s_id for s_id in subject_ids if s_id] - if not (subject_ids and doc.text): + subject_set = doc.subject_set + if not (subject_set and doc.text): continue # noqa print(' '.join(doc.text.split()), file=txt_file) Xs.append( sp.csr_matrix(vector, dtype=np.float32).sorted_indices()) ys.append( sp.csr_matrix(( - np.ones(len(subject_ids)), + np.ones(len(subject_set)), ( - np.zeros(len(subject_ids)), - subject_ids)), + np.zeros(len(subject_set)), + [s for s in subject_set])), shape=(1, len(self.project.subjects)), dtype=np.float32 ).sorted_indices()) @@ -239,11 +234,8 @@ def _suggest(self, text, params): post_processor=new_params['post_processor']) results = [] for idx, score in zip(prediction.indices, prediction.data): - subject = self.project.subjects[idx] results.append(SubjectSuggestion( - uri=subject[0], - label=subject[1], - notation=subject[2], + subject_id=idx, score=score )) return ListSuggestionResult(results) From efbb05cc07c2076e1b215b59a9f64897ac4d0799 Mon Sep 17 00:00:00 2001 From: Lakshmi-Bashyam Date: Wed, 6 Dec 2023 17:23:04 +0100 Subject: [PATCH 12/18] Working transformer backend --- annif/backend/xtransformer.py | 10 ++++++---- pyproject.toml | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index 3267496aa..1df71c230 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -7,7 +7,7 @@ import numpy as np import scipy.sparse as sp from pecos.utils.featurization.text.preprocess import Preprocessor -from pecos.xmc.xtransformer import matcher +from pecos.xmc.xtransformer import matcher, model from pecos.xmc.xtransformer.model import XTransformer from pecos.xmc.xtransformer.module import MLProblemWithText @@ -187,15 +187,17 @@ def _create_model(self, params, jobs): self.info("Start training") # enable progress - matcher.LOGGER.setLevel(logging.INFO) + matcher.LOGGER.setLevel(logging.DEBUG) matcher.LOGGER.addHandler(logging.StreamHandler(stream=stdout)) + model.LOGGER.setLevel(logging.DEBUG) + model.LOGGER.addHandler(logging.StreamHandler(stream=stdout)) self._model = XTransformer.train( MLProblemWithText(train_txts, train_y, X_feat=train_X), clustering=None, val_prob=None, train_params=train_params, pred_params=pred_params, - beam_size=params["beam_size"], + beam_size=int(params["beam_size"]), steps_scale=None, label_feat=None, ) @@ -227,7 +229,7 @@ def _suggest(self, text, params): [text], X_feat=vector.sorted_indices(), batch_size=new_params["batch_size"], - use_gpu=False, + use_gpu=True, only_top_k=new_params["limit"], post_processor=new_params["post_processor"], ) diff --git a/pyproject.toml b/pyproject.toml index fbd5cab8e..ce94578ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ lmdb = {version = "1.4.0", optional = true} omikuji = {version = "0.5.*", optional = true} yake = {version = "0.4.5", optional = true} spacy = {version = "3.4.*", optional = true} -libpecos = {version = "0.4.*", optional = true} +libpecos = {version = "1.*", optional = true} [tool.poetry.dev-dependencies] py = "*" From 3e02a72f4a7ec6148edb01f0dab7879607d8dcb3 Mon Sep 17 00:00:00 2001 From: Lakshmi-Bashyam Date: Tue, 20 Aug 2024 17:33:39 +0200 Subject: [PATCH 13/18] xtrans test fixed, stwfsa import fixed --- annif/backend/stwfsa.py | 2 +- annif/backend/xtransformer.py | 2 +- tests/test_backend_xtransformer.py | 38 ++++++++++-------------------- 3 files changed, 14 insertions(+), 28 deletions(-) diff --git a/annif/backend/stwfsa.py b/annif/backend/stwfsa.py index 1c5309aa9..ec9b756cb 100644 --- a/annif/backend/stwfsa.py +++ b/annif/backend/stwfsa.py @@ -6,7 +6,7 @@ from stwfsapy.predictor import StwfsapyPredictor from annif.exception import NotInitializedException, NotSupportedException -from annif.suggestion import ListSuggestionResult, SubjectSuggestion +from annif.suggestion import SubjectSuggestion from annif.util import apply_param_parse_config, atomic_save, boolean from . import backend diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index 1be62ff89..a914b0823 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -93,7 +93,7 @@ class XTransformerBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): "imbalanced_depth": 100, "max_match_clusters": 32768, "do_fine_tune": True, - "model_shortcut": "distilbert-base-multilingual-cased", + "model_shortcut": "bert-base-multilingual-uncased", "beam_size": 20, "limit": 100, "post_processor": "sigmoid", diff --git a/tests/test_backend_xtransformer.py b/tests/test_backend_xtransformer.py index 90bb59b05..78628f476 100644 --- a/tests/test_backend_xtransformer.py +++ b/tests/test_backend_xtransformer.py @@ -90,7 +90,7 @@ def test_xtransformer_create_train_files(tmpdir, project, datadir): + "arkeologia\thttp://www.yso.fi/onto/yso/p1265\n" + "...\thttp://example.com/none" ) - corpus = annif.corpus.DocumentFile(str(tmpfile)) + corpus = annif.corpus.DocumentFile(str(tmpfile), project.subjects) backend_type = annif.backend.get_backend("xtransformer") xtransformer = backend_type( backend_id="xtransformer", config_params={}, project=project @@ -121,9 +121,9 @@ def test_xtransformer_train(datadir, document_corpus, project, mocked_xtransform train_mock.assert_called_once() first_arg = train_mock.call_args.args[0] kwargs = train_mock.call_args.kwargs - assert len(first_arg.X_text) == 6397 - assert first_arg.X_feat.shape == (6397, 12480) - assert first_arg.Y.shape == (6397, 130) + assert len(first_arg.X_text) == 6402 + assert first_arg.X_feat.shape == (6402, 12479) + assert first_arg.Y.shape == (6402, 130) expected_pred_params = XTransformer.PredParams.from_dict( { "beam_size": 20, @@ -144,8 +144,6 @@ def test_xtransformer_train(datadir, document_corpus, project, mocked_xtransform "max_leaf_size": 100, "imbalanced_ratio": 0.0, "imbalanced_depth": 100, - "max_match_clusters": 32768, - "do_fine_tune": True, "model_shortcut": "bert-base-multilingual-uncased", # 'model_shortcut': 'distilbert-base-multilingual-cased', "post_processor": "sigmoid", @@ -216,32 +214,20 @@ def test_xtransformer_suggest(project): ) xtransformer._model = MagicMock() xtransformer._model.predict.return_value = csr_matrix([0, 0.2, 0, 0, 0, 0.5, 0]) - result = xtransformer.suggest( + results = xtransformer.suggest( + [ """Arkeologiaa sanotaan joskus myös muinaistutkimukseksi tai muinaistieteeksi. Se on humanistinen tiede tai oikeammin joukko tieteitä, jotka tutkivat ihmisen menneisyyttä. Tutkimusta tehdään analysoimalla muinaisjäännöksiä eli niitä jälkiä, joita ihmisten toiminta on jättänyt maaperään tai vesistöjen pohjaan.""" - ) + ] + )[0] xtransformer._model.predict.assert_called_once() - expected = [ - annif.suggestion.SubjectSuggestion( - uri=project.subjects._uris[1], - label=project.subjects._labels[1], - notation=None, - score=0.2, - ), - annif.suggestion.SubjectSuggestion( - uri=project.subjects._uris[5], - label=project.subjects._labels[5], - notation=None, - score=0.5, - ), - ] - assert result.as_list(None) == expected - + ship_finds = project.subjects.by_uri("http://www.yso.fi/onto/yso/p8869") + assert ship_finds in [result.subject_id for result in results] def test_xtransformer_suggest_no_input(project, datadir): backend_type = annif.backend.get_backend("xtransformer") @@ -249,8 +235,8 @@ def test_xtransformer_suggest_no_input(project, datadir): backend_id="xtransfomer", config_params={"limit": 5}, project=project ) xtransformer._model = MagicMock() - results = xtransformer.suggest("j") - assert len(results.as_list(None)) == 0 + results = xtransformer.suggest(["j"]) + assert len(results) == 0 def test_xtransformer_suggest_no_model(datadir, project): From 7379061c03ef661dedf6a4feb484b1bd0f96687e Mon Sep 17 00:00:00 2001 From: Lakshmi-Bashyam Date: Wed, 28 Aug 2024 15:14:17 +0200 Subject: [PATCH 14/18] Change default to smaller model --- annif/backend/xtransformer.py | 2 +- tests/test_backend_xtransformer.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index a914b0823..bc75198ec 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -93,7 +93,7 @@ class XTransformerBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): "imbalanced_depth": 100, "max_match_clusters": 32768, "do_fine_tune": True, - "model_shortcut": "bert-base-multilingual-uncased", + "model_shortcut": "distilbert-base-multilingual-uncased", "beam_size": 20, "limit": 100, "post_processor": "sigmoid", diff --git a/tests/test_backend_xtransformer.py b/tests/test_backend_xtransformer.py index 78628f476..2d4b1bc68 100644 --- a/tests/test_backend_xtransformer.py +++ b/tests/test_backend_xtransformer.py @@ -41,8 +41,8 @@ def test_xtransformer_default_params(project): "imbalanced_depth": 100, "max_match_clusters": 32768, "do_fine_tune": True, - # 'model_shortcut': 'distilbert-base-multilingual-cased', - "model_shortcut": "bert-base-multilingual-uncased", + 'model_shortcut': 'distilbert-base-multilingual-uncased', + # "model_shortcut": "bert-base-multilingual-uncased", "beam_size": 20, "limit": 100, "post_processor": "sigmoid", @@ -144,8 +144,8 @@ def test_xtransformer_train(datadir, document_corpus, project, mocked_xtransform "max_leaf_size": 100, "imbalanced_ratio": 0.0, "imbalanced_depth": 100, - "model_shortcut": "bert-base-multilingual-uncased", - # 'model_shortcut': 'distilbert-base-multilingual-cased', + # "model_shortcut": "bert-base-multilingual-uncased", + 'model_shortcut': 'distilbert-base-multilingual-uncased', "post_processor": "sigmoid", "negative_sampling": "tfn", "ensemble_method": "transformer-only", From 2078a653145b37117bec47d693c93081e423cdce Mon Sep 17 00:00:00 2001 From: Lakshmi-Bashyam Date: Thu, 19 Sep 2024 16:27:13 +0200 Subject: [PATCH 15/18] Fix linting errors --- annif/backend/xtransformer.py | 12 +++--------- annif/util.py | 1 - tests/test_backend_xtransformer.py | 2 +- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index bc75198ec..a0f70beea 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -3,7 +3,7 @@ import logging import os.path as osp from sys import stdout -from typing import TYPE_CHECKING, Any +from typing import Any import numpy as np import scipy.sparse as sp @@ -12,8 +12,9 @@ from pecos.xmc.xtransformer.model import XTransformer from pecos.xmc.xtransformer.module import MLProblemWithText +from annif.corpus.document import DocumentCorpus from annif.exception import NotInitializedException, NotSupportedException -from annif.suggestion import SuggestionBatch, SubjectSuggestion, vector_to_suggestions +from annif.suggestion import SubjectSuggestion, SuggestionBatch from annif.util import ( apply_param_parse_config, atomic_save, @@ -24,13 +25,6 @@ from . import backend, mixins -# if TYPE_CHECKING: -from collections.abc import Iterator - -from scipy.sparse._csr import csr_matrix - -from annif.corpus.document import DocumentCorpus - class XTransformerBackend(mixins.TfidfVectorizerMixin, backend.AnnifBackend): """XTransformer based backend for Annif""" diff --git a/annif/util.py b/annif/util.py index 0068a8c9a..bd1328fa2 100644 --- a/annif/util.py +++ b/annif/util.py @@ -9,7 +9,6 @@ import tempfile from shutil import rmtree from typing import Any, Callable -import numpy as np from annif import logger diff --git a/tests/test_backend_xtransformer.py b/tests/test_backend_xtransformer.py index 2d4b1bc68..a2db4c997 100644 --- a/tests/test_backend_xtransformer.py +++ b/tests/test_backend_xtransformer.py @@ -226,7 +226,7 @@ def test_xtransformer_suggest(project): )[0] xtransformer._model.predict.assert_called_once() - ship_finds = project.subjects.by_uri("http://www.yso.fi/onto/yso/p8869") + ship_finds = project.subjects.by_uri("https://www.yso.fi/onto/yso/p8869") assert ship_finds in [result.subject_id for result in results] def test_xtransformer_suggest_no_input(project, datadir): From f1b9c788185d67aec54b05344ac9af2a810ba3de Mon Sep 17 00:00:00 2001 From: Lakshmi-Bashyam Date: Wed, 25 Sep 2024 12:52:10 +0200 Subject: [PATCH 16/18] code formatting changes --- annif/backend/xtransformer.py | 1 - annif/util.py | 1 - tests/test_backend_xtransformer.py | 7 ++++--- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index a0f70beea..2ba3302e0 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -230,7 +230,6 @@ def _suggest_batch( self, texts: list[str], params: dict[str, Any] ) -> SuggestionBatch: vector = self.vectorizer.transform(texts) - if vector.nnz == 0: # All zero vector, empty result return list() new_params = apply_param_parse_config(self.PARAM_CONFIG, params) diff --git a/annif/util.py b/annif/util.py index bd1328fa2..b816d24d3 100644 --- a/annif/util.py +++ b/annif/util.py @@ -129,7 +129,6 @@ def apply_param_parse_config(configs, params): } - def boolean(val: Any) -> bool: """Convert the given value to a boolean True/False value, if it isn't already. True values are '1', 'yes', 'true', and 'on' (case insensitive), everything diff --git a/tests/test_backend_xtransformer.py b/tests/test_backend_xtransformer.py index a2db4c997..f3e7af760 100644 --- a/tests/test_backend_xtransformer.py +++ b/tests/test_backend_xtransformer.py @@ -41,7 +41,7 @@ def test_xtransformer_default_params(project): "imbalanced_depth": 100, "max_match_clusters": 32768, "do_fine_tune": True, - 'model_shortcut': 'distilbert-base-multilingual-uncased', + "model_shortcut": "distilbert-base-multilingual-uncased", # "model_shortcut": "bert-base-multilingual-uncased", "beam_size": 20, "limit": 100, @@ -145,7 +145,7 @@ def test_xtransformer_train(datadir, document_corpus, project, mocked_xtransform "imbalanced_ratio": 0.0, "imbalanced_depth": 100, # "model_shortcut": "bert-base-multilingual-uncased", - 'model_shortcut': 'distilbert-base-multilingual-uncased', + "model_shortcut": "distilbert-base-multilingual-uncased", "post_processor": "sigmoid", "negative_sampling": "tfn", "ensemble_method": "transformer-only", @@ -216,7 +216,7 @@ def test_xtransformer_suggest(project): xtransformer._model.predict.return_value = csr_matrix([0, 0.2, 0, 0, 0, 0.5, 0]) results = xtransformer.suggest( [ - """Arkeologiaa sanotaan joskus myös + """Arkeologiaa sanotaan joskus myös muinaistutkimukseksi tai muinaistieteeksi. Se on humanistinen tiede tai oikeammin joukko tieteitä, jotka tutkivat ihmisen menneisyyttä. Tutkimusta tehdään analysoimalla muinaisjäännöksiä eli niitä jälkiä, @@ -229,6 +229,7 @@ def test_xtransformer_suggest(project): ship_finds = project.subjects.by_uri("https://www.yso.fi/onto/yso/p8869") assert ship_finds in [result.subject_id for result in results] + def test_xtransformer_suggest_no_input(project, datadir): backend_type = annif.backend.get_backend("xtransformer") xtransformer = backend_type( From 5e41dce76e7125c8c8f2d46ba79b8f1174df453e Mon Sep 17 00:00:00 2001 From: Lakshmi-Bashyam Date: Wed, 25 Sep 2024 12:53:40 +0200 Subject: [PATCH 17/18] security bot fix --- annif/backend/xtransformer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/annif/backend/xtransformer.py b/annif/backend/xtransformer.py index 2ba3302e0..0ec5a42a7 100644 --- a/annif/backend/xtransformer.py +++ b/annif/backend/xtransformer.py @@ -2,7 +2,7 @@ import logging import os.path as osp -from sys import stdout +import sys from typing import Any import numpy as np @@ -190,9 +190,9 @@ def _create_model(self, params, jobs): self.info("Start training") # enable progress matcher.LOGGER.setLevel(logging.DEBUG) - matcher.LOGGER.addHandler(logging.StreamHandler(stream=stdout)) + matcher.LOGGER.addHandler(logging.StreamHandler(stream=sys.stdout)) model.LOGGER.setLevel(logging.DEBUG) - model.LOGGER.addHandler(logging.StreamHandler(stream=stdout)) + model.LOGGER.addHandler(logging.StreamHandler(stream=sys.stdout)) self._model = XTransformer.train( MLProblemWithText(train_txts, train_y, X_feat=train_X), clustering=None, From 4c33a31d7b9f4806c89122b288ad381bce3d3989 Mon Sep 17 00:00:00 2001 From: Lakshmi-Bashyam Date: Wed, 25 Sep 2024 12:57:34 +0200 Subject: [PATCH 18/18] typo fix --- tests/test_backend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_backend.py b/tests/test_backend.py index c8d710e85..12b317733 100644 --- a/tests/test_backend.py +++ b/tests/test_backend.py @@ -97,7 +97,7 @@ def test_get_backend_yake_not_installed(): @pytest.mark.skipif( importlib.util.find_spec("pecos") is not None, - reason="test requires that YAKE is NOT installed", + reason="test requires that PECOS is NOT installed", ) def test_get_backend_xtransformer_not_installed(): with pytest.raises(ValueError) as excinfo: